summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore10
-rw-r--r--.stestr.conf3
-rw-r--r--.testr.conf8
-rw-r--r--.zuul.yaml24
-rw-r--r--README.rst1
-rw-r--r--api-ref/source/v1/stack-outputs.inc2
-rw-r--r--api-ref/source/v1/stacks.inc14
-rwxr-xr-xbin/heat-api-cloudwatch46
-rwxr-xr-xbin/heat-keystone-setup-domain12
-rw-r--r--config-generator.conf1
-rw-r--r--contrib/heat_docker/heat_docker/resources/docker_container.py4
-rw-r--r--contrib/heat_docker/requirements.txt2
-rw-r--r--contrib/rackspace/README.md58
-rw-r--r--contrib/rackspace/heat_keystoneclient_v2/client.py255
-rw-r--r--contrib/rackspace/heat_keystoneclient_v2/tests/__init__.py5
-rw-r--r--contrib/rackspace/heat_keystoneclient_v2/tests/test_client.py274
-rw-r--r--contrib/rackspace/rackspace/__init__.py1
-rw-r--r--contrib/rackspace/rackspace/clients.py246
-rw-r--r--contrib/rackspace/rackspace/resources/auto_scale.py789
-rw-r--r--contrib/rackspace/rackspace/resources/cloud_dns.py216
-rw-r--r--contrib/rackspace/rackspace/resources/cloud_loadbalancer.py1198
-rw-r--r--contrib/rackspace/rackspace/resources/cloud_server.py309
-rw-r--r--contrib/rackspace/rackspace/resources/cloudnetworks.py165
-rw-r--r--contrib/rackspace/rackspace/resources/lb_node.py230
-rw-r--r--contrib/rackspace/rackspace/tests/__init__.py5
-rw-r--r--contrib/rackspace/rackspace/tests/test_auto_scale.py1219
-rw-r--r--contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py2202
-rw-r--r--contrib/rackspace/rackspace/tests/test_cloudnetworks.py199
-rw-r--r--contrib/rackspace/rackspace/tests/test_lb_node.py305
-rw-r--r--contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py662
-rw-r--r--contrib/rackspace/rackspace/tests/test_rackspace_dns.py316
-rw-r--r--contrib/rackspace/requirements.txt1
-rw-r--r--contrib/rackspace/setup.cfg43
-rw-r--r--contrib/rackspace/setup.py30
-rw-r--r--devstack/lib/heat57
-rw-r--r--devstack/settings2
-rwxr-xr-xdevstack/upgrade/resources.sh41
-rw-r--r--devstack/upgrade/settings6
-rwxr-xr-xdevstack/upgrade/shutdown.sh6
-rwxr-xr-xdevstack/upgrade/upgrade.sh12
-rw-r--r--doc/source/conf.py16
-rw-r--r--doc/source/configuration/api.rst3
-rw-r--r--doc/source/configuration/index.rst2
-rw-r--r--doc/source/configuration/sample_config.rst12
-rw-r--r--doc/source/configuration/sample_policy.rst18
-rw-r--r--doc/source/configuration/tables/heat-api.rst2
-rw-r--r--doc/source/configuration/tables/heat-cloudwatch_api.rst42
-rw-r--r--doc/source/configuration/tables/heat-common.rst2
-rw-r--r--doc/source/contributing/blueprints.rst2
-rw-r--r--doc/source/ext/resources.py18
-rw-r--r--doc/source/glossary.rst2
-rw-r--r--doc/source/index.rst1
-rw-r--r--doc/source/install/get_started.rst3
-rw-r--r--doc/source/man/heat-api-cloudwatch.rst38
-rw-r--r--doc/source/man/heat-manage.rst2
-rw-r--r--doc/source/man/index.rst1
-rw-r--r--doc/source/operating_guides/httpd.rst11
-rw-r--r--doc/source/template_guide/hot_spec.rst12
-rw-r--r--etc/heat/api-paste.ini14
-rw-r--r--etc/heat/heat-policy-generator.conf3
-rw-r--r--etc/heat/policy.json97
-rw-r--r--heat/api/aws/exception.py1
-rw-r--r--heat/api/cfn/v1/stacks.py4
-rw-r--r--heat/api/cloudwatch/__init__.py67
-rw-r--r--heat/api/cloudwatch/watch.py321
-rw-r--r--heat/api/middleware/ssl.py45
-rw-r--r--heat/api/openstack/__init__.py8
-rw-r--r--heat/api/openstack/v1/actions.py5
-rw-r--r--heat/api/openstack/v1/build_info.py5
-rw-r--r--heat/api/openstack/v1/events.py7
-rw-r--r--heat/api/openstack/v1/resources.py13
-rw-r--r--heat/api/openstack/v1/services.py5
-rw-r--r--heat/api/openstack/v1/software_configs.py10
-rw-r--r--heat/api/openstack/v1/software_deployments.py12
-rw-r--r--heat/api/openstack/v1/stacks.py63
-rw-r--r--heat/cmd/all.py4
-rw-r--r--heat/cmd/api_cloudwatch.py78
-rw-r--r--heat/cmd/engine.py4
-rw-r--r--heat/common/config.py11
-rw-r--r--heat/common/exception.py5
-rw-r--r--heat/common/grouputils.py117
-rw-r--r--heat/common/policy.py37
-rw-r--r--heat/common/timeutils.py2
-rw-r--r--heat/common/wsgi.py40
-rw-r--r--heat/db/sqlalchemy/api.py96
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/086_drop_watch_rule_watch_data_tables.py53
-rw-r--r--heat/db/sqlalchemy/models.py34
-rw-r--r--heat/engine/api.py3
-rw-r--r--heat/engine/cfn/functions.py2
-rw-r--r--heat/engine/check_resource.py6
-rw-r--r--heat/engine/clients/client_exception.py4
-rw-r--r--heat/engine/clients/os/barbican.py8
-rw-r--r--heat/engine/clients/os/cinder.py2
-rw-r--r--heat/engine/clients/os/heat_plugin.py25
-rw-r--r--heat/engine/clients/os/monasca.py9
-rw-r--r--heat/engine/clients/os/nova.py76
-rw-r--r--heat/engine/clients/os/octavia.py107
-rw-r--r--heat/engine/clients/os/openstacksdk.py52
-rw-r--r--heat/engine/clients/os/senlin.py54
-rw-r--r--heat/engine/clients/os/zun.py16
-rw-r--r--heat/engine/constraint/heat_constraints.py45
-rw-r--r--heat/engine/constraints.py36
-rw-r--r--heat/engine/environment.py2
-rw-r--r--heat/engine/function.py23
-rw-r--r--heat/engine/hot/functions.py14
-rw-r--r--heat/engine/output.py13
-rw-r--r--heat/engine/parameters.py33
-rw-r--r--heat/engine/properties.py27
-rw-r--r--heat/engine/resource.py246
-rw-r--r--heat/engine/resources/aws/autoscaling/autoscaling_group.py24
-rw-r--r--heat/engine/resources/aws/cfn/stack.py6
-rw-r--r--heat/engine/resources/aws/ec2/eip.py111
-rw-r--r--heat/engine/resources/aws/lb/loadbalancer.py6
-rw-r--r--heat/engine/resources/openstack/aodh/alarm.py32
-rw-r--r--heat/engine/resources/openstack/heat/autoscaling_group.py112
-rw-r--r--heat/engine/resources/openstack/heat/cloud_watch.py182
-rw-r--r--heat/engine/resources/openstack/heat/ha_restarter.py109
-rw-r--r--heat/engine/resources/openstack/heat/instance_group.py53
-rw-r--r--heat/engine/resources/openstack/heat/random_string.py6
-rw-r--r--heat/engine/resources/openstack/heat/resource_chain.py64
-rw-r--r--heat/engine/resources/openstack/heat/resource_group.py218
-rw-r--r--heat/engine/resources/openstack/heat/scaling_policy.py31
-rw-r--r--heat/engine/resources/openstack/heat/software_deployment.py25
-rw-r--r--heat/engine/resources/openstack/keystone/project.py20
-rw-r--r--heat/engine/resources/openstack/magnum/cluster_template.py40
-rw-r--r--heat/engine/resources/openstack/mistral/external_resource.py4
-rw-r--r--heat/engine/resources/openstack/mistral/workflow.py2
-rw-r--r--heat/engine/resources/openstack/monasca/notification.py2
-rw-r--r--heat/engine/resources/openstack/neutron/rbac_policy.py38
-rw-r--r--heat/engine/resources/openstack/nova/floatingip.py33
-rw-r--r--heat/engine/resources/openstack/nova/server.py3
-rw-r--r--heat/engine/resources/openstack/nova/server_network_mixin.py105
-rw-r--r--heat/engine/resources/openstack/octavia/__init__.py (renamed from contrib/rackspace/heat_keystoneclient_v2/__init__.py)0
-rw-r--r--heat/engine/resources/openstack/octavia/health_monitor.py170
-rw-r--r--heat/engine/resources/openstack/octavia/l7policy.py205
-rw-r--r--heat/engine/resources/openstack/octavia/l7rule.py148
-rw-r--r--heat/engine/resources/openstack/octavia/listener.py203
-rw-r--r--heat/engine/resources/openstack/octavia/loadbalancer.py163
-rw-r--r--heat/engine/resources/openstack/octavia/octavia_base.py95
-rw-r--r--heat/engine/resources/openstack/octavia/pool.py221
-rw-r--r--heat/engine/resources/openstack/octavia/pool_member.py153
-rw-r--r--heat/engine/resources/openstack/sahara/cluster.py19
-rw-r--r--heat/engine/resources/openstack/sahara/job_binary.py5
-rw-r--r--heat/engine/resources/openstack/sahara/templates.py5
-rw-r--r--heat/engine/resources/openstack/trove/cluster.py125
-rw-r--r--heat/engine/resources/openstack/zun/container.py104
-rw-r--r--heat/engine/resources/stack_resource.py63
-rw-r--r--heat/engine/resources/template_resource.py40
-rw-r--r--heat/engine/resources/wait_condition.py2
-rw-r--r--heat/engine/rsrc_defn.py28
-rw-r--r--heat/engine/service.py213
-rw-r--r--heat/engine/service_software_config.py19
-rw-r--r--heat/engine/service_stack_watch.py109
-rw-r--r--heat/engine/stack.py112
-rw-r--r--heat/engine/sync_point.py53
-rw-r--r--heat/engine/template.py11
-rw-r--r--heat/engine/template_files.py2
-rw-r--r--heat/engine/translation.py22
-rw-r--r--heat/engine/watchrule.py396
-rw-r--r--heat/httpd/files/heat-api-cloudwatch-uwsgi.ini14
-rw-r--r--heat/httpd/files/heat-api-cloudwatch.conf28
-rw-r--r--heat/httpd/files/uwsgi-heat-api-cloudwatch.conf2
-rw-r--r--heat/httpd/heat_api_cloudwatch.py51
-rw-r--r--heat/locale/de/LC_MESSAGES/heat.po94
-rw-r--r--heat/locale/es/LC_MESSAGES/heat.po84
-rw-r--r--heat/locale/fr/LC_MESSAGES/heat.po83
-rw-r--r--heat/locale/it/LC_MESSAGES/heat.po84
-rw-r--r--heat/locale/ja/LC_MESSAGES/heat.po82
-rw-r--r--heat/locale/ko_KR/LC_MESSAGES/heat.po82
-rw-r--r--heat/locale/pt_BR/LC_MESSAGES/heat.po90
-rw-r--r--heat/locale/ru/LC_MESSAGES/heat.po83
-rw-r--r--heat/locale/zh_CN/LC_MESSAGES/heat.po80
-rw-r--r--heat/locale/zh_TW/LC_MESSAGES/heat.po80
-rw-r--r--heat/objects/event.py2
-rw-r--r--heat/objects/raw_template.py2
-rw-r--r--heat/objects/resource.py10
-rw-r--r--heat/objects/watch_data.py60
-rw-r--r--heat/objects/watch_rule.py87
-rw-r--r--heat/policies/__init__.py20
-rw-r--r--heat/policies/actions.py37
-rw-r--r--heat/policies/build_info.py35
-rw-r--r--heat/policies/cloudformation.py66
-rw-r--r--heat/policies/events.py48
-rw-r--r--heat/policies/resource.py84
-rw-r--r--heat/policies/resource_types.py69
-rw-r--r--heat/policies/service.py27
-rw-r--r--heat/policies/software_configs.py79
-rw-r--r--heat/policies/software_deployments.py91
-rw-r--r--heat/policies/stacks.py370
-rw-r--r--heat/rpc/api.py60
-rw-r--r--heat/rpc/client.py54
-rw-r--r--heat/scaling/cooldown.py82
-rw-r--r--heat/tests/api/cfn/test_api_cfn_v1.py20
-rw-r--r--heat/tests/api/cloudwatch/__init__.py0
-rw-r--r--heat/tests/api/cloudwatch/test_api_cloudwatch.py539
-rw-r--r--heat/tests/api/openstack_v1/test_stacks.py2
-rw-r--r--heat/tests/autoscaling/test_heat_scaling_group.py249
-rw-r--r--heat/tests/autoscaling/test_heat_scaling_policy.py178
-rw-r--r--heat/tests/autoscaling/test_scaling_group.py125
-rw-r--r--heat/tests/autoscaling/test_scaling_policy.py144
-rw-r--r--heat/tests/aws/test_eip.py274
-rw-r--r--heat/tests/aws/test_volume.py3
-rw-r--r--heat/tests/clients/test_clients.py20
-rw-r--r--heat/tests/clients/test_monasca_client.py8
-rw-r--r--heat/tests/clients/test_nova_client.py7
-rw-r--r--heat/tests/clients/test_octavia_client.py24
-rw-r--r--heat/tests/clients/test_sdk_client.py3
-rw-r--r--heat/tests/clients/test_senlin_client.py36
-rw-r--r--heat/tests/clients/test_swift_client.py4
-rw-r--r--heat/tests/clients/test_zun_client.py2
-rw-r--r--heat/tests/constraints/test_heat_constraints.py82
-rw-r--r--heat/tests/db/test_migrations.py1
-rw-r--r--heat/tests/db/test_sqlalchemy_api.py132
-rw-r--r--heat/tests/engine/service/test_software_config.py1
-rw-r--r--heat/tests/engine/service/test_stack_watch.py270
-rw-r--r--heat/tests/engine/test_resource_type.py2
-rw-r--r--heat/tests/engine/test_sync_point.py7
-rw-r--r--heat/tests/openstack/aodh/test_alarm.py65
-rw-r--r--heat/tests/openstack/designate/test_domain.py2
-rw-r--r--heat/tests/openstack/designate/test_record.py2
-rw-r--r--heat/tests/openstack/designate/test_recordset.py2
-rw-r--r--heat/tests/openstack/designate/test_zone.py2
-rw-r--r--heat/tests/openstack/heat/test_cloudwatch.py120
-rw-r--r--heat/tests/openstack/heat/test_cw_alarm.py161
-rw-r--r--heat/tests/openstack/heat/test_deployed_server.py3
-rw-r--r--heat/tests/openstack/heat/test_instance_group.py48
-rw-r--r--heat/tests/openstack/heat/test_random_string.py4
-rw-r--r--heat/tests/openstack/heat/test_resource_chain.py175
-rw-r--r--heat/tests/openstack/heat/test_resource_group.py420
-rw-r--r--heat/tests/openstack/heat/test_restarter.py101
-rw-r--r--heat/tests/openstack/heat/test_software_deployment.py221
-rw-r--r--heat/tests/openstack/heat/test_swiftsignal.py6
-rw-r--r--heat/tests/openstack/keystone/test_domain.py2
-rw-r--r--heat/tests/openstack/keystone/test_endpoint.py2
-rw-r--r--heat/tests/openstack/keystone/test_group.py2
-rw-r--r--heat/tests/openstack/keystone/test_project.py34
-rw-r--r--heat/tests/openstack/keystone/test_region.py2
-rw-r--r--heat/tests/openstack/keystone/test_role.py2
-rw-r--r--heat/tests/openstack/keystone/test_service.py2
-rw-r--r--heat/tests/openstack/keystone/test_user.py2
-rw-r--r--heat/tests/openstack/magnum/test_cluster_template.py10
-rw-r--r--heat/tests/openstack/manila/test_share.py2
-rw-r--r--heat/tests/openstack/mistral/test_workflow.py8
-rw-r--r--heat/tests/openstack/monasca/test_alarm_definition.py2
-rw-r--r--heat/tests/openstack/monasca/test_notification.py2
-rw-r--r--heat/tests/openstack/neutron/test_neutron_net.py1
-rw-r--r--heat/tests/openstack/neutron/test_neutron_port.py1
-rw-r--r--heat/tests/openstack/neutron/test_neutron_rbac_policy.py27
-rw-r--r--heat/tests/openstack/neutron/test_neutron_router.py1
-rw-r--r--heat/tests/openstack/neutron/test_neutron_subnet.py1
-rw-r--r--heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py2
-rw-r--r--heat/tests/openstack/neutron/test_sfc/test_port_chain.py2
-rw-r--r--heat/tests/openstack/neutron/test_sfc/test_port_pair.py2
-rw-r--r--heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py2
-rw-r--r--heat/tests/openstack/nova/fakes.py2
-rw-r--r--heat/tests/openstack/nova/test_floatingip.py167
-rw-r--r--heat/tests/openstack/nova/test_server.py448
-rw-r--r--heat/tests/openstack/octavia/__init__.py (renamed from contrib/rackspace/rackspace/resources/__init__.py)0
-rw-r--r--heat/tests/openstack/octavia/inline_templates.py133
-rw-r--r--heat/tests/openstack/octavia/test_health_monitor.py149
-rw-r--r--heat/tests/openstack/octavia/test_l7policy.py263
-rw-r--r--heat/tests/openstack/octavia/test_l7rule.py178
-rw-r--r--heat/tests/openstack/octavia/test_listener.py187
-rw-r--r--heat/tests/openstack/octavia/test_loadbalancer.py170
-rw-r--r--heat/tests/openstack/octavia/test_pool.py200
-rw-r--r--heat/tests/openstack/octavia/test_pool_member.py167
-rw-r--r--heat/tests/openstack/sahara/test_cluster.py32
-rw-r--r--heat/tests/openstack/senlin/test_cluster.py4
-rw-r--r--heat/tests/openstack/senlin/test_node.py4
-rw-r--r--heat/tests/openstack/senlin/test_policy.py8
-rw-r--r--heat/tests/openstack/senlin/test_receiver.py4
-rw-r--r--heat/tests/openstack/trove/test_cluster.py19
-rw-r--r--heat/tests/openstack/zun/test_container.py53
-rw-r--r--heat/tests/policy/deny_stack_user.json12
-rw-r--r--heat/tests/policy/resources.json2
-rw-r--r--heat/tests/test_common_policy.py145
-rw-r--r--heat/tests/test_convg_stack.py24
-rw-r--r--heat/tests/test_engine_service.py2
-rw-r--r--heat/tests/test_engine_service_stack_watch.py118
-rw-r--r--heat/tests/test_environment.py2
-rw-r--r--heat/tests/test_fault_middleware.py2
-rw-r--r--heat/tests/test_function.py8
-rw-r--r--heat/tests/test_grouputils.py208
-rw-r--r--heat/tests/test_hot.py44
-rw-r--r--heat/tests/test_metadata_refresh.py1
-rw-r--r--heat/tests/test_properties.py13
-rw-r--r--heat/tests/test_provider_template.py3
-rw-r--r--heat/tests/test_resource.py2
-rw-r--r--heat/tests/test_rpc_client.py17
-rw-r--r--heat/tests/test_signal.py7
-rw-r--r--heat/tests/test_stack.py33
-rw-r--r--heat/tests/test_stack_resource.py4
-rw-r--r--heat/tests/test_stack_update.py8
-rw-r--r--heat/tests/test_template.py4
-rw-r--r--heat/tests/test_template_format.py2
-rw-r--r--heat/tests/test_translation_rule.py57
-rw-r--r--heat/tests/test_validate.py91
-rw-r--r--heat/tests/test_watch.py978
-rw-r--r--heat_integrationtests/README.rst28
-rw-r--r--heat_integrationtests/__init__.py61
-rw-r--r--heat_integrationtests/api/__init__.py0
-rw-r--r--heat_integrationtests/api/gabbits/environments.yaml55
-rw-r--r--heat_integrationtests/api/gabbits/resources.yaml90
-rw-r--r--heat_integrationtests/api/gabbits/resourcetypes.yaml24
-rw-r--r--heat_integrationtests/api/gabbits/stacks.yaml162
-rw-r--r--heat_integrationtests/api/gabbits/templates.yaml37
-rw-r--r--heat_integrationtests/api/test_heat_api.py44
-rwxr-xr-xheat_integrationtests/cleanup_test_env.sh2
-rw-r--r--heat_integrationtests/common/clients.py14
-rw-r--r--heat_integrationtests/common/config.py81
-rw-r--r--heat_integrationtests/common/remote_client.py202
-rw-r--r--heat_integrationtests/common/test.py48
-rw-r--r--heat_integrationtests/config-generator.conf4
-rw-r--r--heat_integrationtests/functional/test_conditions.py6
-rw-r--r--heat_integrationtests/functional/test_create_update.py10
-rw-r--r--heat_integrationtests/functional/test_create_update_neutron_port.py101
-rw-r--r--heat_integrationtests/functional/test_create_update_neutron_subnet.py127
-rw-r--r--heat_integrationtests/functional/test_create_update_neutron_trunk.py275
-rw-r--r--heat_integrationtests/functional/test_encrypted_parameter.py65
-rw-r--r--heat_integrationtests/functional/test_encryption_vol_type.py87
-rw-r--r--heat_integrationtests/functional/test_event_sinks.py79
-rw-r--r--heat_integrationtests/functional/test_external_ref.py83
-rw-r--r--heat_integrationtests/functional/test_heat_autoscaling.py6
-rw-r--r--heat_integrationtests/functional/test_hooks.py281
-rw-r--r--heat_integrationtests/functional/test_lbaasv2.py155
-rw-r--r--heat_integrationtests/functional/test_nova_server_networks.py149
-rw-r--r--heat_integrationtests/functional/test_os_wait_condition.py107
-rw-r--r--heat_integrationtests/functional/test_preview.py237
-rw-r--r--heat_integrationtests/functional/test_reload_on_sighup.py142
-rw-r--r--heat_integrationtests/functional/test_remote_stack.py144
-rw-r--r--heat_integrationtests/functional/test_replace_deprecated.py12
-rw-r--r--heat_integrationtests/functional/test_resource_group.py31
-rw-r--r--heat_integrationtests/functional/test_resources_list.py50
-rw-r--r--heat_integrationtests/functional/test_software_config.py283
-rw-r--r--heat_integrationtests/functional/test_stack_events.py109
-rw-r--r--heat_integrationtests/functional/test_stack_outputs.py155
-rw-r--r--heat_integrationtests/functional/test_stack_tags.py77
-rw-r--r--heat_integrationtests/functional/test_template_validate.py292
-rw-r--r--heat_integrationtests/functional/test_template_versions.py31
-rw-r--r--heat_integrationtests/functional/test_templates.py72
-rw-r--r--heat_integrationtests/functional/test_unicode_template.py122
-rw-r--r--heat_integrationtests/functional/test_waitcondition.py72
-rw-r--r--heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po8
-rw-r--r--heat_integrationtests/locale/ko_KR/LC_MESSAGES/heat_integrationtests.po8
-rw-r--r--heat_integrationtests/plugin.py40
-rwxr-xr-xheat_integrationtests/post_test_hook.sh3
-rwxr-xr-xheat_integrationtests/pre_test_hook.sh5
-rwxr-xr-xheat_integrationtests/prepare_test_env.sh115
-rwxr-xr-xheat_integrationtests/prepare_test_network.sh5
-rw-r--r--heat_integrationtests/scenario/__init__.py0
-rw-r--r--heat_integrationtests/scenario/scenario_base.py63
-rw-r--r--heat_integrationtests/scenario/templates/app_server_lbv2_neutron.yaml69
-rw-r--r--heat_integrationtests/scenario/templates/app_server_neutron.yaml65
-rw-r--r--heat_integrationtests/scenario/templates/boot_config_none_env.yaml5
-rw-r--r--heat_integrationtests/scenario/templates/netcat-webapp.yaml35
-rw-r--r--heat_integrationtests/scenario/templates/test_aodh_alarm.yaml37
-rw-r--r--heat_integrationtests/scenario/templates/test_autoscaling_lb_neutron.yaml113
-rw-r--r--heat_integrationtests/scenario/templates/test_autoscaling_lbv2_neutron.yaml116
-rw-r--r--heat_integrationtests/scenario/templates/test_base_resources.yaml110
-rw-r--r--heat_integrationtests/scenario/templates/test_server_cfn_init.yaml97
-rw-r--r--heat_integrationtests/scenario/templates/test_server_signal.yaml107
-rw-r--r--heat_integrationtests/scenario/templates/test_server_software_config.yaml173
-rw-r--r--heat_integrationtests/scenario/templates/test_volumes_create_from_backup.yaml118
-rw-r--r--heat_integrationtests/scenario/templates/test_volumes_delete_snapshot.yaml124
-rw-r--r--heat_integrationtests/scenario/test_aodh_alarm.py64
-rw-r--r--heat_integrationtests/scenario/test_autoscaling_lb.py110
-rw-r--r--heat_integrationtests/scenario/test_autoscaling_lbv2.py110
-rw-r--r--heat_integrationtests/scenario/test_base_resources.py73
-rw-r--r--heat_integrationtests/scenario/test_server_cfn_init.py122
-rw-r--r--heat_integrationtests/scenario/test_server_signal.py85
-rw-r--r--heat_integrationtests/scenario/test_server_software_config.py171
-rw-r--r--heat_integrationtests/scenario/test_volumes.py129
-rwxr-xr-xinstall.sh1
-rw-r--r--playbooks/devstack/functional/run.yaml22
-rw-r--r--playbooks/devstack/grenade/run.yaml3
-rw-r--r--releasenotes/notes/add-hostname-hints-security_groups-to-container-d3b69ae4b6f71fc7.yaml5
-rw-r--r--releasenotes/notes/deprecate-threshold-alarm-5738f5ab8aebfd20.yaml5
-rw-r--r--releasenotes/notes/drop-watch-rule-watch-data-tables-9ecb8da574611236.yaml5
-rw-r--r--releasenotes/notes/force-delete-nova-instance-6ed5d7fbd5b6f5fe.yaml9
-rw-r--r--releasenotes/notes/hidden-heat-harestarter-resource-a123479c317886a3.yaml12
-rw-r--r--releasenotes/notes/octavia-resources-0a25720e16dfe55d.yaml19
-rw-r--r--releasenotes/notes/policy-in-code-124372f6cdb0a497.yaml15
-rw-r--r--releasenotes/notes/project-tags-orchestration-If9125519e35f9f95ea8343cb07c377de9ccf5edf.yaml5
-rw-r--r--releasenotes/notes/remove-SSLMiddleware-2f15049af559f26a.yaml7
-rw-r--r--releasenotes/notes/remove-cloudwatch-api-149403251da97b41.yaml7
-rw-r--r--releasenotes/notes/remove-heat-resourcetype-constraint-b679618a149fc04e.yaml4
-rw-r--r--releasenotes/notes/resource_group_removal_policies_mode-d489e0cc49942e2a.yaml6
-rw-r--r--releasenotes/notes/set-networks-for-trove-cluster-b997a049eedbad17.yaml3
-rw-r--r--releasenotes/notes/sync-queens-releasenote-13f68851f7201e37.yaml21
-rw-r--r--releasenotes/notes/system-random-string-38a14ae2cb6f4a24.yaml6
-rw-r--r--releasenotes/source/conf.py13
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po276
-rw-r--r--releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po8
-rw-r--r--releasenotes/source/queens.rst6
-rw-r--r--requirements.txt32
-rw-r--r--setup.cfg13
-rw-r--r--test-requirements.txt9
-rw-r--r--tools/README.rst15
-rw-r--r--tools/custom_guidelines.py18
-rw-r--r--tools/dashboards/heat.dash33
-rw-r--r--tox.ini18
402 files changed, 9034 insertions, 22868 deletions
diff --git a/.gitignore b/.gitignore
index c307e065e..5babaa2c2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,6 +9,7 @@ tags
heat-test.db
heat.sqlite
.venv
+.stestr/*
AUTHORS
ChangeLog
templates/cloudformation-examples
@@ -24,9 +25,16 @@ etc/heat/heat.conf.sample
.idea
# integration tests requirements are auto-generated from stub file
heat_integrationtests/requirements.txt
+heat_integrationtests/heat_integrationtests.conf.sample
# generated policy file
-etc/heat/policy.json.sample
+etc/heat/policy.yaml.sample
+
+# sample policy file included in docs
+doc/source/_static/heat.policy.yaml.sample
# Files created by releasenotes build
releasenotes/build
+
+# sample config included in docs
+doc/source/_static/heat.conf.sample
diff --git a/.stestr.conf b/.stestr.conf
new file mode 100644
index 000000000..0d94bfb92
--- /dev/null
+++ b/.stestr.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+test_path=${TEST_PATH:-./heat/tests}
+top_dir=./
diff --git a/.testr.conf b/.testr.conf
deleted file mode 100644
index 044f5a32a..000000000
--- a/.testr.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[DEFAULT]
-test_command=
- PYTHON=$(echo ${PYTHON:-python} | sed 's/--source heat//g')
- START_AT=${TESTR_START_DIR:-.}
- ${PYTHON} -m subunit.run discover -s $START_AT -t . $LISTOPT $IDOPTION
- if [ "$START_AT" = "." ]; then for plugin in $START_AT/contrib/*; do ${PYTHON} -m subunit.run discover -s $plugin $LISTOPT $IDOPTION; done; fi
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/.zuul.yaml b/.zuul.yaml
index 266562974..c8c62ab5e 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,8 +1,8 @@
- job:
name: heat-functional-devstack-base
parent: legacy-dsvm-base
- run: playbooks/devstack/functional/run
- post-run: playbooks/devstack/functional/post
+ run: playbooks/devstack/functional/run.yaml
+ post-run: playbooks/devstack/functional/post.yaml
timeout: 7800
required-projects:
- openstack-infra/devstack-gate
@@ -12,6 +12,7 @@
- openstack/ceilometer
- openstack/devstack-plugin-amqp1
- openstack/heat
+ - openstack/heat-tempest-plugin
- openstack/neutron
- openstack/neutron-lbaas
- openstack/oslo.messaging
@@ -56,14 +57,12 @@
- job:
name: heat-functional-convg-mysql-lbaasv2-non-apache
parent: heat-functional-devstack-base
- branches: ^(?!stable/newton).*$
vars:
use_apache: 0
- job:
name: heat-functional-convg-mysql-lbaasv2-py35
parent: heat-functional-devstack-base
- branches: ^(?!stable/(newton|ocata)).*$
vars:
use_python3: 1
@@ -78,14 +77,14 @@
- job:
name: grenade-heat
parent: legacy-dsvm-base
- run: playbooks/devstack/grenade/run
- post-run: playbooks/devstack/functional/post
+ run: playbooks/devstack/grenade/run.yaml
+ post-run: playbooks/devstack/functional/post.yaml
timeout: 7800
- branches: ^(?!(driverfixes|stable/(mitaka|newton))).*$
required-projects:
- openstack-dev/grenade
- openstack-infra/devstack-gate
- openstack/heat
+ - openstack/heat-tempest-plugin
irrelevant-files:
- ^(test-|)requirements.txt$
- ^.*\.rst$
@@ -105,16 +104,16 @@
nodeset: ubuntu-xenial-2-node
roles:
- zuul: openstack-infra/zuul-jobs
- pre-run: playbooks/devstack/multinode-networking/pre
+ pre-run: playbooks/devstack/multinode-networking/pre.yaml
voting: false
vars:
topology: multinode
- project:
- name: openstack/heat
check:
jobs:
- grenade-heat
+ - grenade-heat-multinode
- heat-functional-orig-mysql-lbaasv2
- heat-functional-convg-mysql-lbaasv2
- heat-functional-convg-mysql-lbaasv2-amqp1
@@ -130,7 +129,6 @@
- heat-functional-convg-mysql-lbaasv2-py35
experimental:
jobs:
- - grenade-heat-multinode
- experimental-tripleo:
- jobs:
- - tripleo-ci-centos-7-ovb-ha-oooq
+ - tripleo-ci-centos-7-scenario002-multinode-oooq
+ - tripleo-ci-centos-7-scenario002-multinode-oooq-container
+
diff --git a/README.rst b/README.rst
index 0983068bf..bfec82963 100644
--- a/README.rst
+++ b/README.rst
@@ -60,4 +60,3 @@ We have integration with
* https://git.openstack.org/cgit/openstack/python-mistralclient (workflow service)
* https://git.openstack.org/cgit/openstack/python-zaqarclient (messaging service)
* https://git.openstack.org/cgit/openstack/python-monascaclient (monitoring service)
-* https://git.openstack.org/cgit/openstack/python-senlinclient (clustering service)
diff --git a/api-ref/source/v1/stack-outputs.inc b/api-ref/source/v1/stack-outputs.inc
index c0d60702d..88ca9d735 100644
--- a/api-ref/source/v1/stack-outputs.inc
+++ b/api-ref/source/v1/stack-outputs.inc
@@ -85,7 +85,7 @@ Response Parameters
.. rest_parameters:: parameters.yaml
- - X-Openstack-Reqeuest-Id: request_id
+ - X-Openstack-Request-Id: request_id
- output: output
- output_key: output_key
- output_value: output_value
diff --git a/api-ref/source/v1/stacks.inc b/api-ref/source/v1/stacks.inc
index 66b1e85d4..3db8c3545 100644
--- a/api-ref/source/v1/stacks.inc
+++ b/api-ref/source/v1/stacks.inc
@@ -52,7 +52,7 @@ Response Parameters
.. rest_parameters:: parameters.yaml
- location: location
- - X-Openstack-Reqeuest-Id: request_id
+ - X-Openstack-Request-Id: request_id
- stack: stack
- id: stack_id
- links: links
@@ -112,7 +112,7 @@ Response Parameters
.. rest_parameters:: parameters.yaml
- - X-Openstack-Reqeuest-Id: request_id
+ - X-Openstack-Request-Id: request_id
- capabilities: capabilities
- creation_time: creation_time
- deletion_time: deletion_time
@@ -805,7 +805,7 @@ Response Parameters
.. rest_parameters:: parameters.yaml
- location: location
- - X-Openstack-Reqeuest-Id: request_id
+ - X-Openstack-Request-Id: request_id
- stack: stack
- id: stack_id
- links: links
@@ -855,7 +855,7 @@ The body of the response contains a map of file names and file contents.
.. rest_parameters:: parameters.yaml
- - X-Openstack-Reqeuest-Id: request_id
+ - X-Openstack-Request-Id: request_id
- action: action
- environment: stack_environment
- files: stack_files
@@ -910,7 +910,7 @@ Response Parameters
.. rest_parameters:: parameters.yaml
- - X-Openstack-Reqeuest-Id: request_id
+ - X-Openstack-Request-Id: request_id
- heat_template_version: heat_template_version
- outputs: template_outputs
- parameters: template_parameters
@@ -959,7 +959,7 @@ Response Parameters
.. rest_parameters:: parameters.yaml
- - X-Openstack-Reqeuest-Id: request_id
+ - X-Openstack-Request-Id: request_id
- encrypted_param_names: encrypted_param_names
- event_sinks: event_sinks
- parameter_defaults: parameter_defaults
@@ -1010,7 +1010,7 @@ The body of the response contains a map of file names and file contents.
.. rest_parameters:: parameters.yaml
- - X-Openstack-Reqeuest-Id: request_id
+ - X-Openstack-Request-Id: request_id
Response Example
----------------
diff --git a/bin/heat-api-cloudwatch b/bin/heat-api-cloudwatch
deleted file mode 100755
index fc3129998..000000000
--- a/bin/heat-api-cloudwatch
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Heat cloudwatch API Server.
-
-This implements an approximation of the Amazon CloudWatch API and translates it
-into a native representation. It then calls the heat-engine via AMQP RPC to
-implement them.
-"""
-
-from oslo_log import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-LOG.warning('DEPRECATED: `heat-api-cloudwatch` script is deprecated. '
- 'Please use the system level heat binaries installed to '
- 'start any of the heat services.')
-
-import os
-import sys
-
-# If ../heat/__init__.py exists, add ../ to Python search path, so that
-# it will override what happens to be installed in /usr/(local/)lib/python...
-POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
- os.pardir,
- os.pardir))
-
-if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'heat', '__init__.py')):
- sys.path.insert(0, POSSIBLE_TOPDIR)
-
-from heat.cmd import api_cloudwatch
-
-api_cloudwatch.main()
diff --git a/bin/heat-keystone-setup-domain b/bin/heat-keystone-setup-domain
index d61393e10..ae17bcb74 100755
--- a/bin/heat-keystone-setup-domain
+++ b/bin/heat-keystone-setup-domain
@@ -29,7 +29,9 @@ DEBUG = False
USERNAME = os.environ.get('OS_USERNAME')
PASSWORD = os.environ.get('OS_PASSWORD')
AUTH_URL = os.environ.get('OS_AUTH_URL', '').replace('v2.0', 'v3')
-TENANT_NAME = os.environ.get('OS_TENANT_NAME')
+PROJECT_NAME = os.environ.get('OS_PROJECT_NAME')
+USER_DOMAIN_NAME = os.environ.get('OS_USER_DOMAIN_NAME')
+PROJECT_DOMAIN_NAME = os.environ.get('OS_PROJECT_DOMAIN_NAME')
opts = [
cfg.StrOpt('stack-user-domain-name',
@@ -82,9 +84,6 @@ HEAT_DOMAIN_PASSWORD = os.environ.get('HEAT_DOMAIN_PASSWORD',
cfg.CONF.stack_domain_admin_password)
HEAT_DOMAIN_DESCRIPTION = 'Contains users and projects created by heat'
-logger.debug("USERNAME=%s" % USERNAME)
-logger.debug("AUTH_URL=%s" % AUTH_URL)
-
CACERT = os.environ.get('OS_CACERT', cfg.CONF.os_cacert)
CERT = os.environ.get('OS_CERT', cfg.CONF.os_cert)
KEY = os.environ.get('OS_KEY', cfg.CONF.os_key)
@@ -98,9 +97,12 @@ def main():
'password': PASSWORD,
'auth_url': AUTH_URL,
'endpoint': AUTH_URL,
- 'tenant_name': TENANT_NAME
+ 'project_name': PROJECT_NAME,
+ 'user_domain_name': USER_DOMAIN_NAME,
+ 'project_domain_name': PROJECT_DOMAIN_NAME
}
+
if insecure:
client_kwargs['verify'] = False
else:
diff --git a/config-generator.conf b/config-generator.conf
index 678f19d59..752851d66 100644
--- a/config-generator.conf
+++ b/config-generator.conf
@@ -9,7 +9,6 @@ namespace = heat.common.wsgi
namespace = heat.engine.clients
namespace = heat.engine.notification
namespace = heat.engine.resources
-namespace = heat.api.middleware.ssl
namespace = heat.api.aws.ec2token
namespace = keystonemiddleware.auth_token
namespace = oslo.messaging
diff --git a/contrib/heat_docker/heat_docker/resources/docker_container.py b/contrib/heat_docker/heat_docker/resources/docker_container.py
index 3c0368b90..b21225da6 100644
--- a/contrib/heat_docker/heat_docker/resources/docker_container.py
+++ b/contrib/heat_docker/heat_docker/resources/docker_container.py
@@ -332,9 +332,9 @@ class DockerContainer(resource.Resource):
if DOCKER_INSTALLED:
endpoint = self.properties.get(self.DOCKER_ENDPOINT)
if endpoint:
- client = docker.Client(endpoint)
+ client = docker.APIClient(endpoint)
else:
- client = docker.Client()
+ client = docker.APIClient()
return client
def _parse_networkinfo_ports(self, networkinfo):
diff --git a/contrib/heat_docker/requirements.txt b/contrib/heat_docker/requirements.txt
index ef8c26370..b72182632 100644
--- a/contrib/heat_docker/requirements.txt
+++ b/contrib/heat_docker/requirements.txt
@@ -1 +1 @@
-docker-py>=0.2.2
+docker>=2.4.2 # Apache-2.0
diff --git a/contrib/rackspace/README.md b/contrib/rackspace/README.md
deleted file mode 100644
index 04562d3a7..000000000
--- a/contrib/rackspace/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Heat resources for working with the Rackspace Cloud
-
-The resources and configuration in this module are for using Heat with the Rackspace Cloud. These resources either
-allow using Rackspace services that don't have equivalent services in OpenStack or account for differences between
-a generic OpenStack deployment and Rackspace Cloud.
-
-This package also includes a Keystone V2 compatible client plugin, that can be used in place of the default client
-for clouds running older versions of Keystone.
-
-## Installation
-
-### 1. Install the Rackspace plugins in Heat
-
-NOTE: These instructions assume the value of heat.conf plugin_dirs includes the
-default directory /usr/lib/heat.
-
-- To install the plugin, from this directory run:
-
- sudo python ./setup.py install
-
-- (Optional) If you want to enable the Keystone V2 client plugin, set the `keystone_backend` option to
-
- `heat.engine.plugins.heat_keystoneclient_v2.client.KeystoneClientV2`
-
-### 2. Restart heat
-
-Only the process "heat-engine" needs to be restarted to load the newly installed
-plugin.
-
-
-## Resources
-The following resources are provided for compatibility:
-
-* `Rackspace::Cloud::Server`:
->Provide compatibility with `OS::Nova::Server` and allow for working `user_data` and `Metadata`. This is deprecated and should be replaced with `OS::Nova::Server` once service compatibility is implemented by Rackspace.
-
-* `Rackspace::Cloud::LoadBalancer`:
->Use the Rackspace Cloud Loadbalancer service; not compatible with `OS::Neutron::LoadBalancer`.
-
-### Usage
-#### Templates
-#### Configuration
-
-
-## Heat Keystone V2
-
-Note that some forward compatibility decisions had to be made for the Keystone V2 client plugin:
-
-* Stack domain users are created as users on the stack owner's tenant
- rather than the stack's domain
-* Trusts are not supported
-
-### How it works
-
-By setting the `keystone_backend` option, the KeystoneBackend class in
-`heat/engine/clients/os/keystone/heat_keystoneclient.py` will instantiate the plugin
-KeystoneClientV2 class and use that instead of the default client in
-`heat/engine/clients/os/keystone/heat_keystoneclient.py`.
diff --git a/contrib/rackspace/heat_keystoneclient_v2/client.py b/contrib/rackspace/heat_keystoneclient_v2/client.py
deleted file mode 100644
index ee3f58ecf..000000000
--- a/contrib/rackspace/heat_keystoneclient_v2/client.py
+++ /dev/null
@@ -1,255 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Client Library for Keystone Resources."""
-
-import weakref
-
-from keystoneclient.v2_0 import client as kc
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import importutils
-
-from heat.common import exception
-
-LOG = logging.getLogger('heat.common.keystoneclient')
-LOG.info("Keystone V2 loaded")
-
-
-class KeystoneClientV2(object):
- """Wrap keystone client so we can encapsulate logic used in resources.
-
- Note: This is intended to be initialized from a resource on a per-session
- basis, so the session context is passed in on initialization
- Also note that a copy of this is created every resource as self.keystone()
- via the code in engine/client.py, so there should not be any need to
- directly instantiate instances of this class inside resources themselves.
- """
-
- def __init__(self, context):
- # If a trust_id is specified in the context, we immediately
- # authenticate so we can populate the context with a trust token
- # otherwise, we delay client authentication until needed to avoid
- # unnecessary calls to keystone.
- #
- # Note that when you obtain a token using a trust, it cannot be
- # used to reauthenticate and get another token, so we have to
- # get a new trust-token even if context.auth_token is set.
- #
- # - context.auth_url is expected to contain the v2.0 keystone endpoint
- self._context = weakref.ref(context)
- self._client = None
-
- if self.context.trust_id:
- # Create a connection to the v2 API, with the trust_id, this
- # populates self.context.auth_token with a trust-scoped token
- self._client = self._v2_client_init()
-
- @property
- def context(self):
- ctxt = self._context()
- assert ctxt is not None, "Need a reference to the context"
- return ctxt
-
- @property
- def client(self):
- if not self._client:
- self._client = self._v2_client_init()
- return self._client
-
- def _v2_client_init(self):
- kwargs = {
- 'auth_url': self.context.auth_url,
- 'endpoint': self.context.auth_url,
- 'region_name': cfg.CONF.region_name_for_services
- }
-
- if self.context.region_name is not None:
- kwargs['region_name'] = self.context.region_name
-
- auth_kwargs = {}
- # Note try trust_id first, as we can't reuse auth_token in that case
- if self.context.trust_id is not None:
- # We got a trust_id, so we use the admin credentials
- # to authenticate, then re-scope the token to the
- # trust impersonating the trustor user.
- # Note that this currently requires the trustor tenant_id
- # to be passed to the authenticate(), unlike the v3 call
- kwargs.update(self._service_admin_creds())
- auth_kwargs['trust_id'] = self.context.trust_id
- auth_kwargs['tenant_id'] = self.context.tenant_id
- elif self.context.auth_token is not None:
- kwargs['tenant_name'] = self.context.project_name
- kwargs['token'] = self.context.auth_token
- elif self.context.password is not None:
- kwargs['username'] = self.context.username
- kwargs['password'] = self.context.password
- kwargs['tenant_name'] = self.context.project_name
- kwargs['tenant_id'] = self.context.tenant_id
- else:
- LOG.error("Keystone v2 API connection failed, no password "
- "or auth_token!")
- raise exception.AuthorizationFailure()
- kwargs['cacert'] = self._get_client_option('ca_file')
- kwargs['insecure'] = self._get_client_option('insecure')
- kwargs['cert'] = self._get_client_option('cert_file')
- kwargs['key'] = self._get_client_option('key_file')
- client = kc.Client(**kwargs)
-
- client.authenticate(**auth_kwargs)
- # If we are authenticating with a trust auth_kwargs are set, so set
- # the context auth_token with the re-scoped trust token
- if auth_kwargs:
- # Sanity check
- if not client.auth_ref.trust_scoped:
- LOG.error("v2 trust token re-scoping failed!")
- raise exception.AuthorizationFailure()
- # All OK so update the context with the token
- self.context.auth_token = client.auth_ref.auth_token
- self.context.auth_url = kwargs.get('auth_url')
- # Ensure the v2 API we're using is not impacted by keystone
- # bug #1239303, otherwise we can't trust the user_id
- if self.context.trustor_user_id != client.auth_ref.user_id:
- LOG.error("Trust impersonation failed, bug #1239303 "
- "suspected, you may need a newer keystone")
- raise exception.AuthorizationFailure()
-
- return client
-
- @staticmethod
- def _service_admin_creds():
- # Import auth_token to have keystone_authtoken settings setup.
- importutils.import_module('keystonemiddleware.auth_token')
-
- creds = {
- 'username': cfg.CONF.keystone_authtoken.admin_user,
- 'password': cfg.CONF.keystone_authtoken.admin_password,
- 'auth_url': cfg.CONF.keystone_authtoken.auth_uri,
- 'tenant_name': cfg.CONF.keystone_authtoken.admin_tenant_name,
- }
-
- return creds
-
- def _get_client_option(self, option):
- # look for the option in the [clients_keystone] section
- # unknown options raise cfg.NoSuchOptError
- cfg.CONF.import_opt(option, 'heat.common.config',
- group='clients_keystone')
- v = getattr(cfg.CONF.clients_keystone, option)
- if v is not None:
- return v
- # look for the option in the generic [clients] section
- cfg.CONF.import_opt(option, 'heat.common.config', group='clients')
- return getattr(cfg.CONF.clients, option)
-
- def create_stack_user(self, username, password=''):
- """Create a user.
-
- User can be defined as part of a stack, either via template
- or created internally by a resource. This user will be added to
- the heat_stack_user_role as defined in the config
- Returns the keystone ID of the resulting user
- """
- if len(username) > 64:
- LOG.warning("Truncating the username %s to the last 64 "
- "characters.", username)
- # get the last 64 characters of the username
- username = username[-64:]
- user = self.client.users.create(username,
- password,
- '%s@openstack.org' % username,
- tenant_id=self.context.tenant_id,
- enabled=True)
-
- # We add the new user to a special keystone role
- # This role is designed to allow easier differentiation of the
- # heat-generated "stack users" which will generally have credentials
- # deployed on an instance (hence are implicitly untrusted)
- roles = self.client.roles.list()
- stack_user_role = [r.id for r in roles
- if r.name == cfg.CONF.heat_stack_user_role]
- if len(stack_user_role) == 1:
- role_id = stack_user_role[0]
- LOG.debug("Adding user %(user)s to role %(role)s"
- % {'user': user.id, 'role': role_id})
- self.client.roles.add_user_role(user.id, role_id,
- self.context.tenant_id)
- else:
- LOG.error("Failed to add user %(user)s to role %(role)s, "
- "check role exists!",
- {'user': username,
- 'role': cfg.CONF.heat_stack_user_role})
-
- return user.id
-
- def delete_stack_user(self, user_id):
- self.client.users.delete(user_id)
-
- def delete_ec2_keypair(self, user_id, accesskey):
- self.client.ec2.delete(user_id, accesskey)
-
- def get_ec2_keypair(self, access, user_id=None):
- uid = user_id or self.client.auth_ref.user_id
- return self.client.ec2.get(uid, access)
-
- def create_ec2_keypair(self, user_id=None):
- uid = user_id or self.client.auth_ref.user_id
- return self.client.ec2.create(uid, self.context.tenant_id)
-
- def disable_stack_user(self, user_id):
- self.client.users.update_enabled(user_id, False)
-
- def enable_stack_user(self, user_id):
- self.client.users.update_enabled(user_id, True)
-
- def url_for(self, **kwargs):
- return self.client.service_catalog.url_for(**kwargs)
-
- @property
- def auth_token(self):
- return self.client.auth_token
-
- # ##################### #
- # V3 Compatible Methods #
- # ##################### #
-
- def create_stack_domain_user(self, username, project_id, password=None):
- return self.create_stack_user(username, password)
-
- def delete_stack_domain_user(self, user_id, project_id):
- return self.delete_stack_user(user_id)
-
- def create_stack_domain_project(self, project_id):
- """Use the tenant ID as domain project."""
- return self.context.tenant_id
-
- def delete_stack_domain_project(self, project_id):
- """Pass through method since no project was created."""
- pass
-
- def create_stack_domain_user_keypair(self, user_id, project_id):
- return self.create_ec2_keypair(user_id)
-
- def delete_stack_domain_user_keypair(self, user_id, project_id,
- credential_id):
- return self.delete_ec2_keypair(user_id, credential_id)
-
- # ###################### #
- # V3 Unsupported Methods #
- # ###################### #
-
- def create_trust_context(self):
- raise exception.NotSupported(feature='Keystone Trusts')
-
- def delete_trust(self, trust_id):
- raise exception.NotSupported(feature='Keystone Trusts')
diff --git a/contrib/rackspace/heat_keystoneclient_v2/tests/__init__.py b/contrib/rackspace/heat_keystoneclient_v2/tests/__init__.py
deleted file mode 100644
index b1967c8f3..000000000
--- a/contrib/rackspace/heat_keystoneclient_v2/tests/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import sys
-
-from mox3 import mox
-
-sys.modules['mox'] = mox
diff --git a/contrib/rackspace/heat_keystoneclient_v2/tests/test_client.py b/contrib/rackspace/heat_keystoneclient_v2/tests/test_client.py
deleted file mode 100644
index f66438ec1..000000000
--- a/contrib/rackspace/heat_keystoneclient_v2/tests/test_client.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import mock
-import mox
-from oslo_config import cfg
-from oslo_utils import importutils
-
-from heat.common import exception
-from heat.tests import common
-from heat.tests import utils
-
-from .. import client as heat_keystoneclient # noqa
-
-
-class KeystoneClientTest(common.HeatTestCase):
- """Test cases for heat.common.heat_keystoneclient."""
-
- def setUp(self):
- super(KeystoneClientTest, self).setUp()
- self.ctx = utils.dummy_context()
-
- # Import auth_token to have keystone_authtoken settings setup.
- importutils.import_module('keystonemiddleware.auth_token')
-
- dummy_url = 'http://server.test:5000/v2.0'
- cfg.CONF.set_override('auth_uri', dummy_url,
- group='keystone_authtoken')
- cfg.CONF.set_override('admin_user', 'heat',
- group='keystone_authtoken')
- cfg.CONF.set_override('admin_password', 'verybadpass',
- group='keystone_authtoken')
- cfg.CONF.set_override('admin_tenant_name', 'service',
- group='keystone_authtoken')
- self.addCleanup(self.m.VerifyAll)
-
- def _stubs_v2(self, method='token', auth_ok=True, trust_scoped=True,
- user_id='trustor_user_id', region=None):
- self.mock_ks_client = self.m.CreateMock(heat_keystoneclient.kc.Client)
- self.m.StubOutWithMock(heat_keystoneclient.kc, "Client")
- if method == 'token':
- heat_keystoneclient.kc.Client(
- auth_url=mox.IgnoreArg(),
- endpoint=mox.IgnoreArg(),
- tenant_name='test_tenant',
- token='abcd1234',
- cacert=None,
- cert=None,
- insecure=False,
- region_name=region,
- key=None).AndReturn(self.mock_ks_client)
- self.mock_ks_client.authenticate().AndReturn(auth_ok)
- elif method == 'password':
- heat_keystoneclient.kc.Client(
- auth_url=mox.IgnoreArg(),
- endpoint=mox.IgnoreArg(),
- tenant_name='test_tenant',
- tenant_id='test_tenant_id',
- username='test_username',
- password='password',
- cacert=None,
- cert=None,
- insecure=False,
- region_name=region,
- key=None).AndReturn(self.mock_ks_client)
- self.mock_ks_client.authenticate().AndReturn(auth_ok)
- if method == 'trust':
- heat_keystoneclient.kc.Client(
- auth_url='http://server.test:5000/v2.0',
- endpoint='http://server.test:5000/v2.0',
- password='verybadpass',
- tenant_name='service',
- username='heat',
- cacert=None,
- cert=None,
- insecure=False,
- region_name=region,
- key=None).AndReturn(self.mock_ks_client)
- self.mock_ks_client.authenticate(trust_id='atrust123',
- tenant_id='test_tenant_id'
- ).AndReturn(auth_ok)
- self.mock_ks_client.auth_ref = self.m.CreateMockAnything()
- self.mock_ks_client.auth_ref.trust_scoped = trust_scoped
- self.mock_ks_client.auth_ref.auth_token = 'atrusttoken'
- self.mock_ks_client.auth_ref.user_id = user_id
-
- def test_username_length(self):
- """Test that user names >64 characters are properly truncated."""
-
- self._stubs_v2()
-
- # a >64 character user name and the expected version
- long_user_name = 'U' * 64 + 'S'
- good_user_name = long_user_name[-64:]
- # mock keystone client user functions
- self.mock_ks_client.users = self.m.CreateMockAnything()
- mock_user = self.m.CreateMockAnything()
- # when keystone is called, the name should have been truncated
- # to the last 64 characters of the long name
- (self.mock_ks_client.users.create(good_user_name, 'password',
- mox.IgnoreArg(), enabled=True,
- tenant_id=mox.IgnoreArg())
- .AndReturn(mock_user))
- # mock out the call to roles; will send an error log message but does
- # not raise an exception
- self.mock_ks_client.roles = self.m.CreateMockAnything()
- self.mock_ks_client.roles.list().AndReturn([])
- self.m.ReplayAll()
- # call create_stack_user with a long user name.
- # the cleanup VerifyAll should verify that though we passed
- # long_user_name, keystone was actually called with a truncated
- # user name
- self.ctx.trust_id = None
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- heat_ks_client.create_stack_user(long_user_name, password='password')
-
- def test_init_v2_password(self):
- """Test creating the client, user/password context."""
-
- self._stubs_v2(method='password')
- self.m.ReplayAll()
-
- self.ctx.auth_token = None
- self.ctx.trust_id = None
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.assertIsNotNone(heat_ks_client.client)
-
- def test_init_v2_bad_nocreds(self):
- """Test creating the client without trusts, no credentials."""
-
- self.ctx.auth_token = None
- self.ctx.username = None
- self.ctx.password = None
- self.ctx.trust_id = None
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.assertRaises(exception.AuthorizationFailure,
- heat_ks_client._v2_client_init)
-
- def test_trust_init(self):
- """Test consuming a trust when initializing."""
-
- self._stubs_v2(method='trust')
- self.m.ReplayAll()
-
- self.ctx.username = None
- self.ctx.password = None
- self.ctx.auth_token = None
- self.ctx.trust_id = 'atrust123'
- self.ctx.trustor_user_id = 'trustor_user_id'
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- client = heat_ks_client.client
- self.assertIsNotNone(client)
-
- def test_trust_init_fail(self):
- """Test consuming a trust when initializing, error scoping."""
-
- self._stubs_v2(method='trust', trust_scoped=False)
- self.m.ReplayAll()
-
- self.ctx.username = None
- self.ctx.password = None
- self.ctx.auth_token = None
- self.ctx.trust_id = 'atrust123'
- self.ctx.trustor_user_id = 'trustor_user_id'
- self.assertRaises(exception.AuthorizationFailure,
- heat_keystoneclient.KeystoneClientV2, self.ctx)
-
- def test_trust_init_fail_impersonation(self):
- """Test consuming a trust when initializing, impersonation error."""
-
- self._stubs_v2(method='trust', user_id='wrong_user_id')
- self.m.ReplayAll()
-
- self.ctx.username = 'heat'
- self.ctx.password = None
- self.ctx.auth_token = None
- self.ctx.trust_id = 'atrust123'
- self.ctx.trustor_user_id = 'trustor_user_id'
- self.assertRaises(exception.AuthorizationFailure,
- heat_keystoneclient.KeystoneClientV2, self.ctx)
-
- def test_trust_init_pw(self):
- """Test trust_id is takes precedence username/password specified."""
-
- self._stubs_v2(method='trust')
- self.m.ReplayAll()
-
- self.ctx.auth_token = None
- self.ctx.trust_id = 'atrust123'
- self.ctx.trustor_user_id = 'trustor_user_id'
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.assertIsNotNone(heat_ks_client._client)
-
- def test_trust_init_token(self):
- """Test trust_id takes precedence when token specified."""
-
- self._stubs_v2(method='trust')
- self.m.ReplayAll()
-
- self.ctx.username = None
- self.ctx.password = None
- self.ctx.trust_id = 'atrust123'
- self.ctx.trustor_user_id = 'trustor_user_id'
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.assertIsNotNone(heat_ks_client._client)
-
- def test_region_name(self):
- """Test region_name is used when specified."""
-
- self._stubs_v2(method='trust', region='region123')
- self.m.ReplayAll()
-
- self.ctx.username = None
- self.ctx.password = None
- self.ctx.auth_token = None
- self.ctx.trust_id = 'atrust123'
- self.ctx.trustor_user_id = 'trustor_user_id'
- self.ctx.region_name = 'region123'
- heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.m.VerifyAll()
-
- # ##################### #
- # V3 Compatible Methods #
- # ##################### #
-
- def test_create_stack_domain_user_pass_through_to_create_stack_user(self):
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- mock_create_stack_user = mock.Mock()
- heat_ks_client.create_stack_user = mock_create_stack_user
- heat_ks_client.create_stack_domain_user('username', 'project_id',
- 'password')
- mock_create_stack_user.assert_called_once_with('username', 'password')
-
- def test_delete_stack_domain_user_pass_through_to_delete_stack_user(self):
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- mock_delete_stack_user = mock.Mock()
- heat_ks_client.delete_stack_user = mock_delete_stack_user
- heat_ks_client.delete_stack_domain_user('user_id', 'project_id')
- mock_delete_stack_user.assert_called_once_with('user_id')
-
- def test_create_stack_domain_project(self):
- tenant_id = self.ctx.tenant_id
- ks = heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.assertEqual(tenant_id, ks.create_stack_domain_project('fakeid'))
-
- def test_delete_stack_domain_project(self):
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.assertIsNone(heat_ks_client.delete_stack_domain_project('fakeid'))
-
- # ###################### #
- # V3 Unsupported Methods #
- # ###################### #
-
- def test_create_trust_context(self):
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.assertRaises(exception.NotSupported,
- heat_ks_client.create_trust_context)
-
- def test_delete_trust(self):
- heat_ks_client = heat_keystoneclient.KeystoneClientV2(self.ctx)
- self.assertRaises(exception.NotSupported,
- heat_ks_client.delete_trust,
- 'fake_trust_id')
diff --git a/contrib/rackspace/rackspace/__init__.py b/contrib/rackspace/rackspace/__init__.py
deleted file mode 100644
index 3cc35306f..000000000
--- a/contrib/rackspace/rackspace/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Contributed Rackspace-specific resources."""
diff --git a/contrib/rackspace/rackspace/clients.py b/contrib/rackspace/rackspace/clients.py
deleted file mode 100644
index ffc745982..000000000
--- a/contrib/rackspace/rackspace/clients.py
+++ /dev/null
@@ -1,246 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Client Libraries for Rackspace Resources."""
-
-import hashlib
-import random
-import time
-
-from glanceclient import client as gc
-from oslo_config import cfg
-from oslo_log import log as logging
-from six.moves.urllib import parse
-from swiftclient import utils as swiftclient_utils
-from troveclient import client as tc
-
-from heat.common import exception
-from heat.engine.clients import client_plugin
-from heat.engine.clients.os import cinder
-from heat.engine.clients.os import glance
-from heat.engine.clients.os import nova
-from heat.engine.clients.os import swift
-from heat.engine.clients.os import trove
-
-
-LOG = logging.getLogger(__name__)
-
-try:
- import pyrax
-except ImportError:
- pyrax = None
-
-
-class RackspaceClientPlugin(client_plugin.ClientPlugin):
-
- pyrax = None
-
- def _get_client(self, name):
- if self.pyrax is None:
- self._authenticate()
- return self.pyrax.get_client(
- name, cfg.CONF.region_name_for_services)
-
- def _authenticate(self):
- """Create an authenticated client context."""
- self.pyrax = pyrax.create_context("rackspace")
- self.pyrax.auth_endpoint = self.context.auth_url
- LOG.info("Authenticating username: %s",
- self.context.username)
- tenant = self.context.tenant_id
- tenant_name = self.context.tenant
- self.pyrax.auth_with_token(self.context.auth_token,
- tenant_id=tenant,
- tenant_name=tenant_name)
- if not self.pyrax.authenticated:
- LOG.warning("Pyrax Authentication Failed.")
- raise exception.AuthorizationFailure()
- LOG.info("User %s authenticated successfully.",
- self.context.username)
-
-
-class RackspaceAutoScaleClient(RackspaceClientPlugin):
-
- def _create(self):
- """Rackspace Auto Scale client."""
- return self._get_client("autoscale")
-
-
-class RackspaceCloudLBClient(RackspaceClientPlugin):
-
- def _create(self):
- """Rackspace cloud loadbalancer client."""
- return self._get_client("load_balancer")
-
-
-class RackspaceCloudDNSClient(RackspaceClientPlugin):
-
- def _create(self):
- """Rackspace cloud dns client."""
- return self._get_client("dns")
-
-
-class RackspaceNovaClient(nova.NovaClientPlugin,
- RackspaceClientPlugin):
-
- def _create(self):
- """Rackspace cloudservers client."""
- client = self._get_client("compute")
- if not client:
- client = super(RackspaceNovaClient, self)._create()
- return client
-
-
-class RackspaceCloudNetworksClient(RackspaceClientPlugin):
-
- def _create(self):
- """Rackspace cloud networks client.
-
- Though pyrax "fixed" the network client bugs that were introduced
- in 1.8, it still doesn't work for contexts because of caching of the
- nova client.
- """
- if not self.pyrax:
- self._authenticate()
- # need special handling now since the contextual
- # pyrax doesn't handle "networks" not being in
- # the catalog
- ep = pyrax._get_service_endpoint(
- self.pyrax, "compute", region=cfg.CONF.region_name_for_services)
- cls = pyrax._client_classes['compute:network']
- client = cls(self.pyrax,
- region_name=cfg.CONF.region_name_for_services,
- management_url=ep)
- return client
-
-
-class RackspaceTroveClient(trove.TroveClientPlugin):
- """Rackspace trove client.
-
- Since the pyrax module uses its own client implementation for Cloud
- Databases, we have to skip pyrax on this one and override the super
- implementation to account for custom service type and regionalized
- management url.
- """
-
- def _create(self):
- service_type = "rax:database"
- con = self.context
- endpoint_type = self._get_client_option('trove', 'endpoint_type')
- args = {
- 'service_type': service_type,
- 'auth_url': con.auth_url,
- 'proxy_token': con.auth_token,
- 'username': None,
- 'password': None,
- 'cacert': self._get_client_option('trove', 'ca_file'),
- 'insecure': self._get_client_option('trove', 'insecure'),
- 'endpoint_type': endpoint_type
- }
-
- client = tc.Client('1.0', **args)
- region = cfg.CONF.region_name_for_services
- management_url = self.url_for(service_type=service_type,
- endpoint_type=endpoint_type,
- region_name=region)
- client.client.auth_token = con.auth_token
- client.client.management_url = management_url
-
- return client
-
-
-class RackspaceCinderClient(cinder.CinderClientPlugin):
-
- def _create(self):
- """Override the region for the cinder client."""
- client = super(RackspaceCinderClient, self)._create()
- management_url = self.url_for(
- service_type='volume',
- region_name=cfg.CONF.region_name_for_services)
- client.client.management_url = management_url
- return client
-
-
-class RackspaceSwiftClient(swift.SwiftClientPlugin):
-
- def is_valid_temp_url_path(self, path):
- """Return True if path is a valid Swift TempURL path, False otherwise.
-
- A Swift TempURL path must:
- - Be five parts, ['', 'v1', 'account', 'container', 'object']
- - Be a v1 request
- - Have account, container, and object values
- - Have an object value with more than just '/'s
-
- :param path: The TempURL path
- :type path: string
- """
- parts = path.split('/', 4)
- return bool(len(parts) == 5 and
- not parts[0] and
- parts[1] == 'v1' and
- parts[2] and
- parts[3] and
- parts[4].strip('/'))
-
- def get_temp_url(self, container_name, obj_name, timeout=None,
- method='PUT'):
- """Return a Swift TempURL."""
- def tenant_uuid():
- access = self.context.auth_token_info['access']
- for role in access['user']['roles']:
- if role['name'] == 'object-store:default':
- return role['tenantId']
-
- key_header = 'x-account-meta-temp-url-key'
- if key_header in self.client().head_account():
- key = self.client().head_account()[key_header]
- else:
- key = hashlib.sha224(str(random.getrandbits(256))).hexdigest()[:32]
- self.client().post_account({key_header: key})
-
- path = '/v1/%s/%s/%s' % (tenant_uuid(), container_name, obj_name)
- if timeout is None:
- timeout = swift.MAX_EPOCH - 60 - time.time()
- tempurl = swiftclient_utils.generate_temp_url(path, timeout, key,
- method)
- sw_url = parse.urlparse(self.client().url)
- return '%s://%s%s' % (sw_url.scheme, sw_url.netloc, tempurl)
-
-
-class RackspaceGlanceClient(glance.GlanceClientPlugin):
-
- def _create(self, version=None):
- con = self.context
- endpoint_type = self._get_client_option('glance', 'endpoint_type')
- endpoint = self.url_for(
- service_type='image',
- endpoint_type=endpoint_type,
- region_name=cfg.CONF.region_name_for_services)
- # Rackspace service catalog includes a tenant scoped glance
- # endpoint so we have to munge the url a bit
- glance_url = parse.urlparse(endpoint)
- # remove the tenant and following from the url
- endpoint = "%s://%s" % (glance_url.scheme, glance_url.hostname)
- args = {
- 'auth_url': con.auth_url,
- 'service_type': 'image',
- 'project_id': con.tenant,
- 'token': self.auth_token,
- 'endpoint_type': endpoint_type,
- 'ca_file': self._get_client_option('glance', 'ca_file'),
- 'cert_file': self._get_client_option('glance', 'cert_file'),
- 'key_file': self._get_client_option('glance', 'key_file'),
- 'insecure': self._get_client_option('glance', 'insecure')
- }
- return gc.Client('2', endpoint, **args)
diff --git a/contrib/rackspace/rackspace/resources/auto_scale.py b/contrib/rackspace/rackspace/resources/auto_scale.py
deleted file mode 100644
index 57f73a40d..000000000
--- a/contrib/rackspace/rackspace/resources/auto_scale.py
+++ /dev/null
@@ -1,789 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Resources for Rackspace Auto Scale."""
-
-import copy
-import six
-
-from heat.common import exception
-from heat.common.i18n import _
-from heat.common import template_format
-from heat.engine import attributes
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine import resource
-from heat.engine import support
-from heat.engine import template as templatem
-
-try:
- from pyrax.exceptions import Forbidden
- from pyrax.exceptions import NotFound
- PYRAX_INSTALLED = True
-except ImportError:
- class Forbidden(Exception):
- """Dummy pyrax exception - only used for testing."""
-
- class NotFound(Exception):
- """Dummy pyrax exception - only used for testing."""
-
- PYRAX_INSTALLED = False
-
-
-class Group(resource.Resource):
- """Represents a scaling group."""
-
- # pyrax differs drastically from the actual Auto Scale API. We'll prefer
- # the true API here, but since pyrax doesn't support the full flexibility
- # of the API, we'll have to restrict what users can provide.
-
- support_status = support.SupportStatus(
- status=support.UNSUPPORTED,
- message=_('This resource is not supported, use at your own risk.'))
-
- # properties are identical to the API POST /groups.
- PROPERTIES = (
- GROUP_CONFIGURATION, LAUNCH_CONFIGURATION,
- ) = (
- 'groupConfiguration', 'launchConfiguration',
- )
-
- _GROUP_CONFIGURATION_KEYS = (
- GROUP_CONFIGURATION_MAX_ENTITIES, GROUP_CONFIGURATION_COOLDOWN,
- GROUP_CONFIGURATION_NAME, GROUP_CONFIGURATION_MIN_ENTITIES,
- GROUP_CONFIGURATION_METADATA,
- ) = (
- 'maxEntities', 'cooldown',
- 'name', 'minEntities',
- 'metadata',
- )
-
- _LAUNCH_CONFIG_KEYS = (
- LAUNCH_CONFIG_ARGS, LAUNCH_CONFIG_TYPE,
- ) = (
- 'args', 'type',
- )
-
- _LAUNCH_CONFIG_ARGS_KEYS = (
- LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
- LAUNCH_CONFIG_ARGS_SERVER,
- LAUNCH_CONFIG_ARGS_STACK,
- ) = (
- 'loadBalancers',
- 'server',
- 'stack',
- )
-
- _LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
- LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID,
- LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT,
- ) = (
- 'loadBalancerId',
- 'port',
- )
-
- _LAUNCH_CONFIG_ARGS_SERVER_KEYS = (
- LAUNCH_CONFIG_ARGS_SERVER_NAME, LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF,
- LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF,
- LAUNCH_CONFIG_ARGS_SERVER_METADATA,
- LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY,
- LAUNCH_CONFIG_ARGS_SERVER_NETWORKS,
- LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG,
- LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME,
- LAUNCH_CONFIG_ARGS_SERVER_USER_DATA,
- LAUNCH_CONFIG_ARGS_SERVER_CDRIVE
- ) = (
- 'name', 'flavorRef',
- 'imageRef',
- 'metadata',
- 'personality',
- 'networks',
- 'diskConfig', # technically maps to OS-DCF:diskConfig
- 'key_name',
- 'user_data',
- 'config_drive'
- )
-
- _LAUNCH_CONFIG_ARGS_SERVER_NETWORK_KEYS = (
- LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID,
- ) = (
- 'uuid',
- )
-
- _LAUNCH_CONFIG_ARGS_STACK_KEYS = (
- LAUNCH_CONFIG_ARGS_STACK_TEMPLATE,
- LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL,
- LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK,
- LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT,
- LAUNCH_CONFIG_ARGS_STACK_FILES,
- LAUNCH_CONFIG_ARGS_STACK_PARAMETERS,
- LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS
- ) = (
- 'template',
- 'template_url',
- 'disable_rollback',
- 'environment',
- 'files',
- 'parameters',
- 'timeout_mins'
- )
-
- _launch_configuration_args_schema = {
- LAUNCH_CONFIG_ARGS_LOAD_BALANCERS: properties.Schema(
- properties.Schema.LIST,
- _('List of load balancers to hook the '
- 'server up to. If not specified, no '
- 'load balancing will be configured.'),
- default=[],
- schema=properties.Schema(
- properties.Schema.MAP,
- schema={
- LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID: properties.Schema(
- properties.Schema.STRING,
- _('ID of the load balancer.'),
- required=True
- ),
- LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT: properties.Schema(
- properties.Schema.INTEGER,
- _('Server port to connect the load balancer to.')
- ),
- },
- )
- ),
- LAUNCH_CONFIG_ARGS_SERVER: properties.Schema(
- properties.Schema.MAP,
- _('Server creation arguments, as accepted by the Cloud Servers '
- 'server creation API.'),
- required=False,
- schema={
- LAUNCH_CONFIG_ARGS_SERVER_NAME: properties.Schema(
- properties.Schema.STRING,
- _('Server name.'),
- required=True
- ),
- LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF: properties.Schema(
- properties.Schema.STRING,
- _('The ID or name of the flavor to boot onto.'),
- constraints=[
- constraints.CustomConstraint('nova.flavor')
- ],
- required=True
- ),
- LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF: properties.Schema(
- properties.Schema.STRING,
- _('The ID or name of the image to boot with.'),
- constraints=[
- constraints.CustomConstraint('glance.image')
- ],
- required=True
- ),
- LAUNCH_CONFIG_ARGS_SERVER_METADATA: properties.Schema(
- properties.Schema.MAP,
- _('Metadata key and value pairs.')
- ),
- LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY: properties.Schema(
- properties.Schema.MAP,
- _('File path and contents.')
- ),
- LAUNCH_CONFIG_ARGS_SERVER_CDRIVE: properties.Schema(
- properties.Schema.BOOLEAN,
- _('Enable config drive on the instance.')
- ),
- LAUNCH_CONFIG_ARGS_SERVER_USER_DATA: properties.Schema(
- properties.Schema.STRING,
- _('User data for bootstrapping the instance.')
- ),
- LAUNCH_CONFIG_ARGS_SERVER_NETWORKS: properties.Schema(
- properties.Schema.LIST,
- _('Networks to attach to. If unspecified, the instance '
- 'will be attached to the public Internet and private '
- 'ServiceNet networks.'),
- schema=properties.Schema(
- properties.Schema.MAP,
- schema={
- LAUNCH_CONFIG_ARGS_SERVER_NETWORK_UUID:
- properties.Schema(
- properties.Schema.STRING,
- _('UUID of network to attach to.'),
- required=True)
- }
- )
- ),
- LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG: properties.Schema(
- properties.Schema.STRING,
- _('Configuration specifying the partition layout. AUTO to '
- 'create a partition utilizing the entire disk, and '
- 'MANUAL to create a partition matching the source '
- 'image.'),
- constraints=[
- constraints.AllowedValues(['AUTO', 'MANUAL']),
- ]
- ),
- LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME: properties.Schema(
- properties.Schema.STRING,
- _('Name of a previously created SSH keypair to allow '
- 'key-based authentication to the server.')
- ),
- },
- ),
- LAUNCH_CONFIG_ARGS_STACK: properties.Schema(
- properties.Schema.MAP,
- _('The attributes that Auto Scale uses to create a new stack. The '
- 'attributes that you specify for the stack entity apply to all '
- 'new stacks in the scaling group. Note the stack arguments are '
- 'directly passed to Heat when creating a stack.'),
- schema={
- LAUNCH_CONFIG_ARGS_STACK_TEMPLATE: properties.Schema(
- properties.Schema.STRING,
- _('The template that describes the stack. Either the '
- 'template or template_url property must be specified.'),
- ),
- LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL: properties.Schema(
- properties.Schema.STRING,
- _('A URI to a template. Either the template or '
- 'template_url property must be specified.')
- ),
- LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK: properties.Schema(
- properties.Schema.BOOLEAN,
- _('Keep the resources that have been created if the stack '
- 'fails to create. Defaults to True.'),
- default=True
- ),
- LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT: properties.Schema(
- properties.Schema.MAP,
- _('The environment for the stack.'),
- ),
- LAUNCH_CONFIG_ARGS_STACK_FILES: properties.Schema(
- properties.Schema.MAP,
- _('The contents of files that the template references.')
- ),
- LAUNCH_CONFIG_ARGS_STACK_PARAMETERS: properties.Schema(
- properties.Schema.MAP,
- _('Key/value pairs of the parameters and their values to '
- 'pass to the parameters in the template.')
- ),
- LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS: properties.Schema(
- properties.Schema.INTEGER,
- _('The stack creation timeout in minutes.')
- )
- }
- )
- }
-
- properties_schema = {
- GROUP_CONFIGURATION: properties.Schema(
- properties.Schema.MAP,
- _('Group configuration.'),
- schema={
- GROUP_CONFIGURATION_MAX_ENTITIES: properties.Schema(
- properties.Schema.INTEGER,
- _('Maximum number of entities in this scaling group.'),
- required=True
- ),
- GROUP_CONFIGURATION_COOLDOWN: properties.Schema(
- properties.Schema.NUMBER,
- _('Number of seconds after capacity changes during '
- 'which further capacity changes are disabled.'),
- required=True
- ),
- GROUP_CONFIGURATION_NAME: properties.Schema(
- properties.Schema.STRING,
- _('Name of the scaling group.'),
- required=True
- ),
- GROUP_CONFIGURATION_MIN_ENTITIES: properties.Schema(
- properties.Schema.INTEGER,
- _('Minimum number of entities in this scaling group.'),
- required=True
- ),
- GROUP_CONFIGURATION_METADATA: properties.Schema(
- properties.Schema.MAP,
- _('Arbitrary key/value metadata to associate with '
- 'this group.')
- ),
- },
- required=True,
- update_allowed=True
- ),
- LAUNCH_CONFIGURATION: properties.Schema(
- properties.Schema.MAP,
- _('Launch configuration.'),
- schema={
- LAUNCH_CONFIG_ARGS: properties.Schema(
- properties.Schema.MAP,
- _('Type-specific launch arguments.'),
- schema=_launch_configuration_args_schema,
- required=True
- ),
- LAUNCH_CONFIG_TYPE: properties.Schema(
- properties.Schema.STRING,
- _('Launch configuration method. Only launch_server and '
- 'launch_stack are currently supported.'),
- required=True,
- constraints=[
- constraints.AllowedValues(['launch_server',
- 'launch_stack']),
- ]
- ),
- },
- required=True,
- update_allowed=True
- ),
- # We don't allow scaling policies to be specified here, despite the
- # fact that the API supports it. Users should use the ScalingPolicy
- # resource.
- }
-
- def _get_group_config_args(self, groupconf):
- """Get the groupConfiguration-related pyrax arguments."""
- return dict(
- name=groupconf[self.GROUP_CONFIGURATION_NAME],
- cooldown=groupconf[self.GROUP_CONFIGURATION_COOLDOWN],
- min_entities=groupconf[self.GROUP_CONFIGURATION_MIN_ENTITIES],
- max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
- metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))
-
- def _get_launch_config_server_args(self, launchconf):
- lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
- server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
- lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
- lbs = copy.deepcopy(lb_args)
- for lb in lbs:
- # if the port is not specified, the lbid must be that of a
- # RackConnectV3 lb pool.
- if not lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]:
- del lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]
- continue
- lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
- lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
- personality = server_args.get(
- self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY)
- if personality:
- personality = [{'path': k, 'contents': v} for k, v in
- personality.items()]
- user_data = server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_USER_DATA)
- cdrive = (server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_CDRIVE) or
- bool(user_data is not None and len(user_data.strip())))
- image_id = self.client_plugin('glance').find_image_by_name_or_id(
- server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF])
- flavor_id = self.client_plugin('nova').find_flavor_by_name_or_id(
- server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF])
-
- return dict(
- launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
- server_name=server_args[self.GROUP_CONFIGURATION_NAME],
- image=image_id,
- flavor=flavor_id,
- disk_config=server_args.get(
- self.LAUNCH_CONFIG_ARGS_SERVER_DISK_CONFIG),
- metadata=server_args.get(self.GROUP_CONFIGURATION_METADATA),
- config_drive=cdrive,
- user_data=user_data,
- personality=personality,
- networks=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_NETWORKS),
- load_balancers=lbs,
- key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
- )
-
- def _get_launch_config_stack_args(self, launchconf):
- lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
- stack_args = lcargs[self.LAUNCH_CONFIG_ARGS_STACK]
- return dict(
- launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
- template=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE],
- template_url=stack_args[
- self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL],
- disable_rollback=stack_args[
- self.LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK],
- environment=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT],
- files=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_FILES],
- parameters=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_PARAMETERS],
- timeout_mins=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS]
- )
-
- def _get_launch_config_args(self, launchconf):
- """Get the launchConfiguration-related pyrax arguments."""
- if launchconf[self.LAUNCH_CONFIG_ARGS].get(
- self.LAUNCH_CONFIG_ARGS_SERVER):
- return self._get_launch_config_server_args(launchconf)
- else:
- return self._get_launch_config_stack_args(launchconf)
-
- def _get_create_args(self):
- """Get pyrax-style arguments for creating a scaling group."""
- args = self._get_group_config_args(
- self.properties[self.GROUP_CONFIGURATION])
- args['group_metadata'] = args.pop('metadata')
- args.update(self._get_launch_config_args(
- self.properties[self.LAUNCH_CONFIGURATION]))
- return args
-
- def handle_create(self):
- """Create the autoscaling group and set resource_id.
-
- The resource_id is set to the resulting group's ID.
- """
- asclient = self.auto_scale()
- group = asclient.create(**self._get_create_args())
- self.resource_id_set(str(group.id))
-
- def handle_check(self):
- self.auto_scale().get(self.resource_id)
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- """Update the group configuration and the launch configuration."""
- asclient = self.auto_scale()
- if self.GROUP_CONFIGURATION in prop_diff:
- args = self._get_group_config_args(
- prop_diff[self.GROUP_CONFIGURATION])
- asclient.replace(self.resource_id, **args)
- if self.LAUNCH_CONFIGURATION in prop_diff:
- args = self._get_launch_config_args(
- prop_diff[self.LAUNCH_CONFIGURATION])
- asclient.replace_launch_config(self.resource_id, **args)
-
- def handle_delete(self):
- """Delete the scaling group.
-
- Since Auto Scale doesn't allow deleting a group until all its servers
- are gone, we must set the minEntities and maxEntities of the group to 0
- and then keep trying the delete until Auto Scale has deleted all the
- servers and the delete will succeed.
- """
- if self.resource_id is None:
- return
- asclient = self.auto_scale()
- args = self._get_group_config_args(
- self.properties[self.GROUP_CONFIGURATION])
- args['min_entities'] = 0
- args['max_entities'] = 0
- try:
- asclient.replace(self.resource_id, **args)
- except NotFound:
- pass
-
- def check_delete_complete(self, result):
- """Try the delete operation until it succeeds."""
- if self.resource_id is None:
- return True
- try:
- self.auto_scale().delete(self.resource_id)
- except Forbidden:
- return False
- except NotFound:
- return True
- else:
- return True
-
- def _check_rackconnect_v3_pool_exists(self, pool_id):
- pools = self.client("rackconnect").list_load_balancer_pools()
- if pool_id in (p.id for p in pools):
- return True
- return False
-
- def validate(self):
- super(Group, self).validate()
- launchconf = self.properties[self.LAUNCH_CONFIGURATION]
- lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
-
- server_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_SERVER)
- st_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_STACK)
-
- # launch_server and launch_stack are required and mutually exclusive.
- if ((not server_args and not st_args) or
- (server_args and st_args)):
- msg = (_('Must provide one of %(server)s or %(stack)s in %(conf)s')
- % {'server': self.LAUNCH_CONFIG_ARGS_SERVER,
- 'stack': self.LAUNCH_CONFIG_ARGS_STACK,
- 'conf': self.LAUNCH_CONFIGURATION})
- raise exception.StackValidationFailed(msg)
-
- lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
- lbs = copy.deepcopy(lb_args)
- for lb in lbs:
- lb_port = lb.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT)
- lb_id = lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID]
- if not lb_port:
- # check if lb id is a valid RCV3 pool id
- if not self._check_rackconnect_v3_pool_exists(lb_id):
- msg = _('Could not find RackConnectV3 pool '
- 'with id %s') % (lb_id)
- raise exception.StackValidationFailed(msg)
-
- if st_args:
- st_tmpl = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE)
- st_tmpl_url = st_args.get(
- self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL)
- st_env = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT)
- # template and template_url are required and mutually exclusive.
- if ((not st_tmpl and not st_tmpl_url) or
- (st_tmpl and st_tmpl_url)):
- msg = _('Must provide one of template or template_url.')
- raise exception.StackValidationFailed(msg)
-
- if st_tmpl:
- st_files = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_FILES)
- try:
- tmpl = template_format.simple_parse(st_tmpl)
- templatem.Template(tmpl, files=st_files, env=st_env)
- except Exception as exc:
- msg = (_('Encountered error while loading template: %s') %
- six.text_type(exc))
- raise exception.StackValidationFailed(msg)
-
- def auto_scale(self):
- return self.client('auto_scale')
-
-
-class ScalingPolicy(resource.Resource):
- """Represents a Rackspace Auto Scale scaling policy."""
-
- support_status = support.SupportStatus(
- status=support.UNSUPPORTED,
- message=_('This resource is not supported, use at your own risk.'))
-
- PROPERTIES = (
- GROUP, NAME, CHANGE, CHANGE_PERCENT, DESIRED_CAPACITY,
- COOLDOWN, TYPE, ARGS,
- ) = (
- 'group', 'name', 'change', 'changePercent', 'desiredCapacity',
- 'cooldown', 'type', 'args',
- )
-
- properties_schema = {
- # group isn't in the post body, but it's in the URL to post to.
- GROUP: properties.Schema(
- properties.Schema.STRING,
- _('Scaling group ID that this policy belongs to.'),
- required=True
- ),
- NAME: properties.Schema(
- properties.Schema.STRING,
- _('Name of this scaling policy.'),
- required=True,
- update_allowed=True
- ),
- CHANGE: properties.Schema(
- properties.Schema.INTEGER,
- _('Amount to add to or remove from current number of instances. '
- 'Incompatible with changePercent and desiredCapacity.'),
- update_allowed=True
- ),
- CHANGE_PERCENT: properties.Schema(
- properties.Schema.NUMBER,
- _('Percentage-based change to add or remove from current number '
- 'of instances. Incompatible with change and desiredCapacity.'),
- update_allowed=True
- ),
- DESIRED_CAPACITY: properties.Schema(
- properties.Schema.INTEGER,
- _('Absolute number to set the number of instances to. '
- 'Incompatible with change and changePercent.'),
- update_allowed=True
- ),
- COOLDOWN: properties.Schema(
- properties.Schema.NUMBER,
- _('Number of seconds after a policy execution during which '
- 'further executions are disabled.'),
- update_allowed=True
- ),
- TYPE: properties.Schema(
- properties.Schema.STRING,
- _('Type of this scaling policy. Specifies how the policy is '
- 'executed.'),
- required=True,
- constraints=[
- constraints.AllowedValues(['webhook', 'schedule',
- 'cloud_monitoring']),
- ],
- update_allowed=True
- ),
- ARGS: properties.Schema(
- properties.Schema.MAP,
- _('Type-specific arguments for the policy.'),
- update_allowed=True
- ),
- }
-
- def _get_args(self, properties):
- """Get pyrax-style create arguments for scaling policies."""
- args = dict(
- scaling_group=properties[self.GROUP],
- name=properties[self.NAME],
- policy_type=properties[self.TYPE],
- cooldown=properties[self.COOLDOWN],
- )
- if properties.get(self.CHANGE) is not None:
- args['change'] = properties[self.CHANGE]
- elif properties.get(self.CHANGE_PERCENT) is not None:
- args['change'] = properties[self.CHANGE_PERCENT]
- args['is_percent'] = True
- elif properties.get(self.DESIRED_CAPACITY) is not None:
- args['desired_capacity'] = properties[self.DESIRED_CAPACITY]
- if properties.get(self.ARGS) is not None:
- args['args'] = properties[self.ARGS]
- return args
-
- def handle_create(self):
- """Create the scaling policy and initialize the resource ID.
-
- The resource ID is initialized to {group_id}:{policy_id}.
- """
- asclient = self.auto_scale()
- args = self._get_args(self.properties)
- policy = asclient.add_policy(**args)
- resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)
- self.resource_id_set(resource_id)
-
- def _get_policy_id(self):
- return self.resource_id.split(':', 1)[1]
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- asclient = self.auto_scale()
- props = json_snippet.properties(self.properties_schema,
- self.context)
- args = self._get_args(props)
- args['policy'] = self._get_policy_id()
- asclient.replace_policy(**args)
-
- def handle_delete(self):
- """Delete the policy if it exists."""
- asclient = self.auto_scale()
- if self.resource_id is None:
- return
- policy_id = self._get_policy_id()
- try:
- asclient.delete_policy(self.properties[self.GROUP], policy_id)
- except NotFound:
- pass
-
- def auto_scale(self):
- return self.client('auto_scale')
-
-
-class WebHook(resource.Resource):
- """Represents a Rackspace AutoScale webhook.
-
- Exposes the URLs of the webhook as attributes.
- """
-
- support_status = support.SupportStatus(
- status=support.UNSUPPORTED,
- message=_('This resource is not supported, use at your own risk.'))
-
- PROPERTIES = (
- POLICY, NAME, METADATA,
- ) = (
- 'policy', 'name', 'metadata',
- )
-
- ATTRIBUTES = (
- EXECUTE_URL, CAPABILITY_URL,
- ) = (
- 'executeUrl', 'capabilityUrl',
- )
-
- properties_schema = {
- POLICY: properties.Schema(
- properties.Schema.STRING,
- _('The policy that this webhook should apply to, in '
- '{group_id}:{policy_id} format. Generally a Ref to a Policy '
- 'resource.'),
- required=True
- ),
- NAME: properties.Schema(
- properties.Schema.STRING,
- _('The name of this webhook.'),
- required=True,
- update_allowed=True
- ),
- METADATA: properties.Schema(
- properties.Schema.MAP,
- _('Arbitrary key/value metadata for this webhook.'),
- update_allowed=True
- ),
- }
-
- attributes_schema = {
- EXECUTE_URL: attributes.Schema(
- _("The url for executing the webhook (requires auth)."),
- cache_mode=attributes.Schema.CACHE_NONE
- ),
- CAPABILITY_URL: attributes.Schema(
- _("The url for executing the webhook (doesn't require auth)."),
- cache_mode=attributes.Schema.CACHE_NONE
- ),
- }
-
- def _get_args(self, props):
- group_id, policy_id = props[self.POLICY].split(':', 1)
- return dict(
- name=props[self.NAME],
- scaling_group=group_id,
- policy=policy_id,
- metadata=props.get(self.METADATA))
-
- def handle_create(self):
- asclient = self.auto_scale()
- args = self._get_args(self.properties)
- webhook = asclient.add_webhook(**args)
- self.resource_id_set(webhook.id)
-
- for link in webhook.links:
- rel_to_key = {'self': 'executeUrl',
- 'capability': 'capabilityUrl'}
- key = rel_to_key.get(link['rel'])
- if key is not None:
- url = link['href'].encode('utf-8')
- self.data_set(key, url)
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- asclient = self.auto_scale()
- args = self._get_args(json_snippet.properties(self.properties_schema,
- self.context))
- args['webhook'] = self.resource_id
- asclient.replace_webhook(**args)
-
- def _resolve_attribute(self, key):
- v = self.data().get(key)
- if v is not None:
- return v.decode('utf-8')
- else:
- return None
-
- def handle_delete(self):
- if self.resource_id is None:
- return
- asclient = self.auto_scale()
- group_id, policy_id = self.properties[self.POLICY].split(':', 1)
- try:
- asclient.delete_webhook(group_id, policy_id, self.resource_id)
- except NotFound:
- pass
-
- def auto_scale(self):
- return self.client('auto_scale')
-
-
-def resource_mapping():
- return {
- 'Rackspace::AutoScale::Group': Group,
- 'Rackspace::AutoScale::ScalingPolicy': ScalingPolicy,
- 'Rackspace::AutoScale::WebHook': WebHook
- }
-
-
-def available_resource_mapping():
- if PYRAX_INSTALLED:
- return resource_mapping()
- return {}
diff --git a/contrib/rackspace/rackspace/resources/cloud_dns.py b/contrib/rackspace/rackspace/resources/cloud_dns.py
deleted file mode 100644
index a577401e2..000000000
--- a/contrib/rackspace/rackspace/resources/cloud_dns.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Resources for Rackspace DNS."""
-
-from oslo_log import log as logging
-
-from heat.common import exception
-from heat.common.i18n import _
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine import resource
-from heat.engine import support
-
-try:
- from pyrax.exceptions import NotFound
- PYRAX_INSTALLED = True
-except ImportError:
- # Setup fake exception for testing without pyrax
- class NotFound(Exception):
- pass
-
- PYRAX_INSTALLED = False
-
-LOG = logging.getLogger(__name__)
-
-
-class CloudDns(resource.Resource):
- """Represents a DNS resource."""
-
- support_status = support.SupportStatus(
- status=support.UNSUPPORTED,
- message=_('This resource is not supported, use at your own risk.'))
-
- PROPERTIES = (
- NAME, EMAIL_ADDRESS, TTL, COMMENT, RECORDS,
- ) = (
- 'name', 'emailAddress', 'ttl', 'comment', 'records',
- )
-
- _RECORD_KEYS = (
- RECORD_COMMENT, RECORD_NAME, RECORD_DATA, RECORD_PRIORITY, RECORD_TTL,
- RECORD_TYPE,
- ) = (
- 'comment', 'name', 'data', 'priority', 'ttl',
- 'type',
- )
-
- properties_schema = {
- NAME: properties.Schema(
- properties.Schema.STRING,
- _('Specifies the name for the domain or subdomain. Must be a '
- 'valid domain name.'),
- required=True,
- constraints=[
- constraints.Length(min=3),
- ]
- ),
- EMAIL_ADDRESS: properties.Schema(
- properties.Schema.STRING,
- _('Email address to use for contacting the domain administrator.'),
- required=True,
- update_allowed=True
- ),
- TTL: properties.Schema(
- properties.Schema.INTEGER,
- _('How long other servers should cache recorddata.'),
- default=3600,
- constraints=[
- constraints.Range(min=300),
- ],
- update_allowed=True
- ),
- COMMENT: properties.Schema(
- properties.Schema.STRING,
- _('Optional free form text comment'),
- constraints=[
- constraints.Length(max=160),
- ],
- update_allowed=True
- ),
- RECORDS: properties.Schema(
- properties.Schema.LIST,
- _('Domain records'),
- schema=properties.Schema(
- properties.Schema.MAP,
- schema={
- RECORD_COMMENT: properties.Schema(
- properties.Schema.STRING,
- _('Optional free form text comment'),
- constraints=[
- constraints.Length(max=160),
- ]
- ),
- RECORD_NAME: properties.Schema(
- properties.Schema.STRING,
- _('Specifies the name for the domain or '
- 'subdomain. Must be a valid domain name.'),
- required=True,
- constraints=[
- constraints.Length(min=3),
- ]
- ),
- RECORD_DATA: properties.Schema(
- properties.Schema.STRING,
- _('Type specific record data'),
- required=True
- ),
- RECORD_PRIORITY: properties.Schema(
- properties.Schema.INTEGER,
- _('Required for MX and SRV records, but '
- 'forbidden for other record types. If '
- 'specified, must be an integer from 0 to '
- '65535.'),
- constraints=[
- constraints.Range(0, 65535),
- ]
- ),
- RECORD_TTL: properties.Schema(
- properties.Schema.INTEGER,
- _('How long other servers should cache '
- 'recorddata.'),
- default=3600,
- constraints=[
- constraints.Range(min=300),
- ]
- ),
- RECORD_TYPE: properties.Schema(
- properties.Schema.STRING,
- _('Specifies the record type.'),
- required=True,
- constraints=[
- constraints.AllowedValues(['A', 'AAAA', 'NS',
- 'MX', 'CNAME',
- 'TXT', 'SRV']),
- ]
- ),
- },
- ),
- update_allowed=True
- ),
- }
-
- def cloud_dns(self):
- return self.client('cloud_dns')
-
- def handle_create(self):
- """Create a Rackspace CloudDns Instance."""
- # There is no check_create_complete as the pyrax create for DNS is
- # synchronous.
- LOG.debug("CloudDns handle_create called.")
- args = dict((k, v) for k, v in self.properties.items())
- for rec in args[self.RECORDS] or {}:
- # only pop the priority for the correct types
- rec_type = rec[self.RECORD_TYPE]
- if (rec_type != 'MX') and (rec_type != 'SRV'):
- rec.pop(self.RECORD_PRIORITY, None)
- dom = self.cloud_dns().create(**args)
- self.resource_id_set(dom.id)
-
- def handle_check(self):
- self.cloud_dns().get(self.resource_id)
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- """Update a Rackspace CloudDns Instance."""
- LOG.debug("CloudDns handle_update called.")
- if not self.resource_id:
- raise exception.Error(_('Update called on a non-existent domain'))
- if prop_diff:
- dom = self.cloud_dns().get(self.resource_id)
-
- # handle records separately
- records = prop_diff.pop(self.RECORDS, {})
-
- if prop_diff:
- # Handle top level domain properties
- dom.update(**prop_diff)
-
- # handle records
- if records:
- recs = dom.list_records()
- # 1. delete all the current records other than rackspace NS records
- [rec.delete() for rec in recs if rec.type != 'NS' or
- 'stabletransit.com' not in rec.data]
- # 2. update with the new records in prop_diff
- dom.add_records(records)
-
- def handle_delete(self):
- """Delete a Rackspace CloudDns Instance."""
- LOG.debug("CloudDns handle_delete called.")
- if self.resource_id:
- try:
- dom = self.cloud_dns().get(self.resource_id)
- dom.delete()
- except NotFound:
- pass
-
-
-def resource_mapping():
- return {'Rackspace::Cloud::DNS': CloudDns}
-
-
-def available_resource_mapping():
- if PYRAX_INSTALLED:
- return resource_mapping()
- return {}
diff --git a/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py b/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py
deleted file mode 100644
index 1205defe9..000000000
--- a/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py
+++ /dev/null
@@ -1,1198 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import itertools
-
-from oslo_log import log as logging
-import six
-
-from heat.common import exception
-from heat.common.i18n import _
-from heat.engine import attributes
-from heat.engine import constraints
-from heat.engine import function
-from heat.engine import properties
-from heat.engine import resource
-from heat.engine import support
-
-try:
- from pyrax.exceptions import NotFound # noqa
- PYRAX_INSTALLED = True
-except ImportError:
- # Setup fake exception for testing without pyrax
- class NotFound(Exception):
- pass
- PYRAX_INSTALLED = False
-
-
-LOG = logging.getLogger(__name__)
-
-
-def lb_immutable(exc):
- if 'immutable' in six.text_type(exc):
- return True
- return False
-
-
-class LoadbalancerBuildError(exception.HeatException):
- msg_fmt = _("There was an error building the loadbalancer:%(lb_name)s.")
-
-
-class CloudLoadBalancer(resource.Resource):
- """Represents a Rackspace Cloud Loadbalancer."""
-
- support_status = support.SupportStatus(
- status=support.UNSUPPORTED,
- message=_('This resource is not supported, use at your own risk.'))
-
- PROPERTIES = (
- NAME, NODES, PROTOCOL, ACCESS_LIST, HALF_CLOSED, ALGORITHM,
- CONNECTION_LOGGING, METADATA, PORT, TIMEOUT,
- CONNECTION_THROTTLE, SESSION_PERSISTENCE, VIRTUAL_IPS,
- CONTENT_CACHING, HEALTH_MONITOR, SSL_TERMINATION, ERROR_PAGE,
- HTTPS_REDIRECT,
- ) = (
- 'name', 'nodes', 'protocol', 'accessList', 'halfClosed', 'algorithm',
- 'connectionLogging', 'metadata', 'port', 'timeout',
- 'connectionThrottle', 'sessionPersistence', 'virtualIps',
- 'contentCaching', 'healthMonitor', 'sslTermination', 'errorPage',
- 'httpsRedirect',
- )
-
- LB_UPDATE_PROPS = (NAME, ALGORITHM, PROTOCOL, HALF_CLOSED, PORT, TIMEOUT,
- HTTPS_REDIRECT)
-
- _NODE_KEYS = (
- NODE_ADDRESSES, NODE_PORT, NODE_CONDITION, NODE_TYPE,
- NODE_WEIGHT,
- ) = (
- 'addresses', 'port', 'condition', 'type',
- 'weight',
- )
-
- _ACCESS_LIST_KEYS = (
- ACCESS_LIST_ADDRESS, ACCESS_LIST_TYPE,
- ) = (
- 'address', 'type',
- )
-
- _CONNECTION_THROTTLE_KEYS = (
- CONNECTION_THROTTLE_MAX_CONNECTION_RATE,
- CONNECTION_THROTTLE_MIN_CONNECTIONS,
- CONNECTION_THROTTLE_MAX_CONNECTIONS,
- CONNECTION_THROTTLE_RATE_INTERVAL,
- ) = (
- 'maxConnectionRate',
- 'minConnections',
- 'maxConnections',
- 'rateInterval',
- )
-
- _VIRTUAL_IP_KEYS = (
- VIRTUAL_IP_TYPE, VIRTUAL_IP_IP_VERSION, VIRTUAL_IP_ID
- ) = (
- 'type', 'ipVersion', 'id'
- )
-
- _HEALTH_MONITOR_KEYS = (
- HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION, HEALTH_MONITOR_DELAY,
- HEALTH_MONITOR_TIMEOUT, HEALTH_MONITOR_TYPE, HEALTH_MONITOR_BODY_REGEX,
- HEALTH_MONITOR_HOST_HEADER, HEALTH_MONITOR_PATH,
- HEALTH_MONITOR_STATUS_REGEX,
- ) = (
- 'attemptsBeforeDeactivation', 'delay',
- 'timeout', 'type', 'bodyRegex',
- 'hostHeader', 'path',
- 'statusRegex',
- )
- _HEALTH_MONITOR_CONNECT_KEYS = (
- HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION, HEALTH_MONITOR_DELAY,
- HEALTH_MONITOR_TIMEOUT, HEALTH_MONITOR_TYPE,
- )
-
- _SSL_TERMINATION_KEYS = (
- SSL_TERMINATION_SECURE_PORT, SSL_TERMINATION_PRIVATEKEY,
- SSL_TERMINATION_CERTIFICATE, SSL_TERMINATION_INTERMEDIATE_CERTIFICATE,
- SSL_TERMINATION_SECURE_TRAFFIC_ONLY,
- ) = (
- 'securePort', 'privatekey',
- 'certificate', 'intermediateCertificate',
- 'secureTrafficOnly',
- )
-
- ATTRIBUTES = (
- PUBLIC_IP, VIPS
- ) = (
- 'PublicIp', 'virtualIps'
- )
-
- ALGORITHMS = ["LEAST_CONNECTIONS", "RANDOM", "ROUND_ROBIN",
- "WEIGHTED_LEAST_CONNECTIONS", "WEIGHTED_ROUND_ROBIN"]
-
- _health_monitor_schema = {
- HEALTH_MONITOR_ATTEMPTS_BEFORE_DEACTIVATION: properties.Schema(
- properties.Schema.NUMBER,
- required=True,
- constraints=[
- constraints.Range(1, 10),
- ]
- ),
- HEALTH_MONITOR_DELAY: properties.Schema(
- properties.Schema.NUMBER,
- required=True,
- constraints=[
- constraints.Range(1, 3600),
- ]
- ),
- HEALTH_MONITOR_TIMEOUT: properties.Schema(
- properties.Schema.NUMBER,
- required=True,
- constraints=[
- constraints.Range(1, 300),
- ]
- ),
- HEALTH_MONITOR_TYPE: properties.Schema(
- properties.Schema.STRING,
- required=True,
- constraints=[
- constraints.AllowedValues(['CONNECT', 'HTTP', 'HTTPS']),
- ]
- ),
- HEALTH_MONITOR_BODY_REGEX: properties.Schema(
- properties.Schema.STRING
- ),
- HEALTH_MONITOR_HOST_HEADER: properties.Schema(
- properties.Schema.STRING
- ),
- HEALTH_MONITOR_PATH: properties.Schema(
- properties.Schema.STRING
- ),
- HEALTH_MONITOR_STATUS_REGEX: properties.Schema(
- properties.Schema.STRING
- ),
- }
-
- properties_schema = {
- NAME: properties.Schema(
- properties.Schema.STRING,
- update_allowed=True
- ),
- NODES: properties.Schema(
- properties.Schema.LIST,
- schema=properties.Schema(
- properties.Schema.MAP,
- schema={
- NODE_ADDRESSES: properties.Schema(
- properties.Schema.LIST,
- required=True,
- description=(_("IP addresses for the load balancer "
- "node. Must have at least one "
- "address.")),
- schema=properties.Schema(
- properties.Schema.STRING
- )
- ),
- NODE_PORT: properties.Schema(
- properties.Schema.INTEGER,
- required=True
- ),
- NODE_CONDITION: properties.Schema(
- properties.Schema.STRING,
- default='ENABLED',
- constraints=[
- constraints.AllowedValues(['ENABLED',
- 'DISABLED',
- 'DRAINING']),
- ]
- ),
- NODE_TYPE: properties.Schema(
- properties.Schema.STRING,
- default='PRIMARY',
- constraints=[
- constraints.AllowedValues(['PRIMARY',
- 'SECONDARY']),
- ]
- ),
- NODE_WEIGHT: properties.Schema(
- properties.Schema.NUMBER,
- default=1,
- constraints=[
- constraints.Range(1, 100),
- ]
- ),
- },
- ),
- required=True,
- update_allowed=True
- ),
- PROTOCOL: properties.Schema(
- properties.Schema.STRING,
- required=True,
- constraints=[
- constraints.AllowedValues(['DNS_TCP', 'DNS_UDP', 'FTP',
- 'HTTP', 'HTTPS', 'IMAPS',
- 'IMAPv4', 'LDAP', 'LDAPS',
- 'MYSQL', 'POP3', 'POP3S', 'SMTP',
- 'TCP', 'TCP_CLIENT_FIRST', 'UDP',
- 'UDP_STREAM', 'SFTP']),
- ],
- update_allowed=True
- ),
- ACCESS_LIST: properties.Schema(
- properties.Schema.LIST,
- schema=properties.Schema(
- properties.Schema.MAP,
- schema={
- ACCESS_LIST_ADDRESS: properties.Schema(
- properties.Schema.STRING,
- required=True
- ),
- ACCESS_LIST_TYPE: properties.Schema(
- properties.Schema.STRING,
- required=True,
- constraints=[
- constraints.AllowedValues(['ALLOW', 'DENY']),
- ]
- ),
- },
- )
- ),
- HALF_CLOSED: properties.Schema(
- properties.Schema.BOOLEAN,
- update_allowed=True
- ),
- ALGORITHM: properties.Schema(
- properties.Schema.STRING,
- constraints=[
- constraints.AllowedValues(ALGORITHMS)
- ],
- update_allowed=True
- ),
- CONNECTION_LOGGING: properties.Schema(
- properties.Schema.BOOLEAN,
- update_allowed=True
- ),
- METADATA: properties.Schema(
- properties.Schema.MAP,
- update_allowed=True
- ),
- PORT: properties.Schema(
- properties.Schema.INTEGER,
- required=True,
- update_allowed=True
- ),
- TIMEOUT: properties.Schema(
- properties.Schema.NUMBER,
- constraints=[
- constraints.Range(1, 120),
- ],
- update_allowed=True
- ),
- CONNECTION_THROTTLE: properties.Schema(
- properties.Schema.MAP,
- schema={
- CONNECTION_THROTTLE_MAX_CONNECTION_RATE: properties.Schema(
- properties.Schema.NUMBER,
- constraints=[
- constraints.Range(0, 100000),
- ]
- ),
- CONNECTION_THROTTLE_MIN_CONNECTIONS: properties.Schema(
- properties.Schema.INTEGER,
- constraints=[
- constraints.Range(1, 1000),
- ]
- ),
- CONNECTION_THROTTLE_MAX_CONNECTIONS: properties.Schema(
- properties.Schema.INTEGER,
- constraints=[
- constraints.Range(1, 100000),
- ]
- ),
- CONNECTION_THROTTLE_RATE_INTERVAL: properties.Schema(
- properties.Schema.NUMBER,
- constraints=[
- constraints.Range(1, 3600),
- ]
- ),
- },
- update_allowed=True
- ),
- SESSION_PERSISTENCE: properties.Schema(
- properties.Schema.STRING,
- constraints=[
- constraints.AllowedValues(['HTTP_COOKIE', 'SOURCE_IP']),
- ],
- update_allowed=True
- ),
- VIRTUAL_IPS: properties.Schema(
- properties.Schema.LIST,
- schema=properties.Schema(
- properties.Schema.MAP,
- schema={
- VIRTUAL_IP_TYPE: properties.Schema(
- properties.Schema.STRING,
- "The type of VIP (public or internal). This property"
- " cannot be specified if 'id' is specified. This "
- "property must be specified if id is not specified.",
- constraints=[
- constraints.AllowedValues(['SERVICENET',
- 'PUBLIC']),
- ]
- ),
- VIRTUAL_IP_IP_VERSION: properties.Schema(
- properties.Schema.STRING,
- "IP version of the VIP. This property cannot be "
- "specified if 'id' is specified. This property must "
- "be specified if id is not specified.",
- constraints=[
- constraints.AllowedValues(['IPV6', 'IPV4']),
- ]
- ),
- VIRTUAL_IP_ID: properties.Schema(
- properties.Schema.NUMBER,
- "ID of a shared VIP to use instead of creating a "
- "new one. This property cannot be specified if type"
- " or version is specified."
- )
- },
- ),
- required=True,
- constraints=[
- constraints.Length(min=1)
- ]
- ),
- CONTENT_CACHING: properties.Schema(
- properties.Schema.STRING,
- constraints=[
- constraints.AllowedValues(['ENABLED', 'DISABLED']),
- ],
- update_allowed=True
- ),
- HEALTH_MONITOR: properties.Schema(
- properties.Schema.MAP,
- schema=_health_monitor_schema,
- update_allowed=True
- ),
- SSL_TERMINATION: properties.Schema(
- properties.Schema.MAP,
- schema={
- SSL_TERMINATION_SECURE_PORT: properties.Schema(
- properties.Schema.INTEGER,
- default=443
- ),
- SSL_TERMINATION_PRIVATEKEY: properties.Schema(
- properties.Schema.STRING,
- required=True
- ),
- SSL_TERMINATION_CERTIFICATE: properties.Schema(
- properties.Schema.STRING,
- required=True
- ),
- # only required if configuring intermediate ssl termination
- # add to custom validation
- SSL_TERMINATION_INTERMEDIATE_CERTIFICATE: properties.Schema(
- properties.Schema.STRING
- ),
- # pyrax will default to false
- SSL_TERMINATION_SECURE_TRAFFIC_ONLY: properties.Schema(
- properties.Schema.BOOLEAN,
- default=False
- ),
- },
- update_allowed=True
- ),
- ERROR_PAGE: properties.Schema(
- properties.Schema.STRING,
- update_allowed=True
- ),
- HTTPS_REDIRECT: properties.Schema(
- properties.Schema.BOOLEAN,
- _("Enables or disables HTTP to HTTPS redirection for the load "
- "balancer. When enabled, any HTTP request returns status code "
- "301 (Moved Permanently), and the requester is redirected to "
- "the requested URL via the HTTPS protocol on port 443. Only "
- "available for HTTPS protocol (port=443), or HTTP protocol with "
- "a properly configured SSL termination (secureTrafficOnly=true, "
- "securePort=443)."),
- update_allowed=True,
- default=False,
- support_status=support.SupportStatus(version="2015.1")
- )
- }
-
- attributes_schema = {
- PUBLIC_IP: attributes.Schema(
- _('Public IP address of the specified instance.')
- ),
- VIPS: attributes.Schema(
- _("A list of assigned virtual ip addresses")
- )
- }
-
- ACTIVE_STATUS = 'ACTIVE'
- DELETED_STATUS = 'DELETED'
- PENDING_DELETE_STATUS = 'PENDING_DELETE'
- PENDING_UPDATE_STATUS = 'PENDING_UPDATE'
-
- def __init__(self, name, json_snippet, stack):
- super(CloudLoadBalancer, self).__init__(name, json_snippet, stack)
- self.clb = self.cloud_lb()
-
- def cloud_lb(self):
- return self.client('cloud_lb')
-
- def _setup_properties(self, properties, function):
- """Use defined schema properties as kwargs for loadbalancer objects."""
- if properties and function:
- return [function(**self._remove_none(item_dict))
- for item_dict in properties]
- elif function:
- return [function()]
-
- def _alter_properties_for_api(self):
- """Set up required, but useless, key/value pairs.
-
- The following properties have useless key/value pairs which must
- be passed into the api. Set them up to make template definition easier.
- """
- session_persistence = None
- if self.SESSION_PERSISTENCE in self.properties.data:
- session_persistence = {'persistenceType':
- self.properties[self.SESSION_PERSISTENCE]}
- connection_logging = None
- if self.CONNECTION_LOGGING in self.properties.data:
- connection_logging = {"enabled":
- self.properties[self.CONNECTION_LOGGING]}
- metadata = None
- if self.METADATA in self.properties.data:
- metadata = [{'key': k, 'value': v}
- for k, v
- in six.iteritems(self.properties[self.METADATA])]
-
- return (session_persistence, connection_logging, metadata)
-
- def _check_active(self, lb=None):
- """Update the loadbalancer state, check the status."""
- if not lb:
- lb = self.clb.get(self.resource_id)
- if lb.status == self.ACTIVE_STATUS:
- return True
- else:
- return False
-
- def _valid_HTTPS_redirect_with_HTTP_prot(self):
- """Determine if HTTPS redirect is valid when protocol is HTTP"""
- proto = self.properties[self.PROTOCOL]
- redir = self.properties[self.HTTPS_REDIRECT]
- termcfg = self.properties.get(self.SSL_TERMINATION) or {}
- seconly = termcfg.get(self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY, False)
- secport = termcfg.get(self.SSL_TERMINATION_SECURE_PORT, 0)
- if (redir and (proto == "HTTP") and seconly and (secport == 443)):
- return True
- return False
-
- def _process_node(self, node):
- for addr in node.get(self.NODE_ADDRESSES, []):
- norm_node = copy.deepcopy(node)
- norm_node['address'] = addr
- del norm_node[self.NODE_ADDRESSES]
- yield norm_node
-
- def _process_nodes(self, node_list):
- node_itr = six.moves.map(self._process_node, node_list)
- return itertools.chain.from_iterable(node_itr)
-
- def _validate_https_redirect(self):
- redir = self.properties[self.HTTPS_REDIRECT]
- proto = self.properties[self.PROTOCOL]
-
- if (redir and (proto != "HTTPS") and
- not self._valid_HTTPS_redirect_with_HTTP_prot()):
- message = _("HTTPS redirect is only available for the HTTPS "
- "protocol (port=443), or the HTTP protocol with "
- "a properly configured SSL termination "
- "(secureTrafficOnly=true, securePort=443).")
- raise exception.StackValidationFailed(message=message)
-
- def handle_create(self):
- node_list = self._process_nodes(self.properties.get(self.NODES))
- nodes = [self.clb.Node(**node) for node in node_list]
- vips = self.properties.get(self.VIRTUAL_IPS)
-
- virtual_ips = self._setup_properties(vips, self.clb.VirtualIP)
-
- (session_persistence, connection_logging, metadata
- ) = self._alter_properties_for_api()
-
- lb_body = {
- 'port': self.properties[self.PORT],
- 'protocol': self.properties[self.PROTOCOL],
- 'nodes': nodes,
- 'virtual_ips': virtual_ips,
- 'algorithm': self.properties.get(self.ALGORITHM),
- 'halfClosed': self.properties.get(self.HALF_CLOSED),
- 'connectionThrottle': self.properties.get(
- self.CONNECTION_THROTTLE),
- 'metadata': metadata,
- 'healthMonitor': self.properties.get(self.HEALTH_MONITOR),
- 'sessionPersistence': session_persistence,
- 'timeout': self.properties.get(self.TIMEOUT),
- 'connectionLogging': connection_logging,
- self.HTTPS_REDIRECT: self.properties[self.HTTPS_REDIRECT]
- }
- if self._valid_HTTPS_redirect_with_HTTP_prot():
- lb_body[self.HTTPS_REDIRECT] = False
- self._validate_https_redirect()
-
- lb_name = (self.properties.get(self.NAME) or
- self.physical_resource_name())
- LOG.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
- lb = self.clb.create(lb_name, **lb_body)
- self.resource_id_set(str(lb.id))
-
- def check_create_complete(self, *args):
- lb = self.clb.get(self.resource_id)
- return (self._check_active(lb) and
- self._create_access_list(lb) and
- self._create_errorpage(lb) and
- self._create_ssl_term(lb) and
- self._create_redirect(lb) and
- self._create_cc(lb))
-
- def _create_access_list(self, lb):
- if not self.properties[self.ACCESS_LIST]:
- return True
-
- old_access_list = lb.get_access_list()
- new_access_list = self.properties[self.ACCESS_LIST]
- if not self._access_list_needs_update(old_access_list,
- new_access_list):
- return True
-
- try:
- lb.add_access_list(new_access_list)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
- return False
-
- def _create_errorpage(self, lb):
- if not self.properties[self.ERROR_PAGE]:
- return True
-
- old_errorpage = lb.get_error_page()
- new_errorpage_content = self.properties[self.ERROR_PAGE]
- new_errorpage = {'errorpage': {'content': new_errorpage_content}}
- if not self._errorpage_needs_update(old_errorpage, new_errorpage):
- return True
-
- try:
- lb.set_error_page(new_errorpage_content)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
- return False
-
- def _create_ssl_term(self, lb):
- if not self.properties[self.SSL_TERMINATION]:
- return True
-
- old_ssl_term = lb.get_ssl_termination()
- new_ssl_term = self.properties[self.SSL_TERMINATION]
- if not self._ssl_term_needs_update(old_ssl_term, new_ssl_term):
- return True
-
- try:
- lb.add_ssl_termination(**new_ssl_term)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
- return False
-
- def _create_redirect(self, lb):
- if not self._valid_HTTPS_redirect_with_HTTP_prot():
- return True
-
- old_redirect = lb.httpsRedirect
- new_redirect = self.properties[self.HTTPS_REDIRECT]
- if not self._redirect_needs_update(old_redirect, new_redirect):
- return True
-
- try:
- lb.update(httpsRedirect=True)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
- return False
-
- def _create_cc(self, lb):
- if not self.properties[self.CONTENT_CACHING]:
- return True
-
- old_cc = lb.content_caching
- new_cc = self.properties[self.CONTENT_CACHING] == 'ENABLED'
- if not self._cc_needs_update(old_cc, new_cc):
- return True
-
- try:
- lb.content_caching = new_cc
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
- return False
-
- def handle_check(self):
- lb = self.clb.get(self.resource_id)
- if not self._check_active():
- raise exception.Error(_("Cloud Loadbalancer is not ACTIVE "
- "(was: %s)") % lb.status)
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- return prop_diff
-
- def check_update_complete(self, prop_diff):
- lb = self.clb.get(self.resource_id)
- return (lb.status != self.PENDING_UPDATE_STATUS and # lb immutable?
- self._update_props(lb, prop_diff) and
- self._update_nodes_add(lb, prop_diff) and
- self._update_nodes_delete(lb, prop_diff) and
- self._update_nodes_change(lb, prop_diff) and
- self._update_health_monitor(lb, prop_diff) and
- self._update_session_persistence(lb, prop_diff) and
- self._update_ssl_termination(lb, prop_diff) and
- self._update_metadata(lb, prop_diff) and
- self._update_errorpage(lb, prop_diff) and
- self._update_connection_logging(lb, prop_diff) and
- self._update_connection_throttle(lb, prop_diff) and
- self._update_content_caching(lb, prop_diff))
-
- def _nodes_need_update_add(self, old, new):
- if not old:
- return True
-
- new = list(self._process_nodes(new))
- new_nodes = ["%s%s" % (x['address'], x['port']) for x in new]
- old_nodes = ["%s%s" % (x.address, x.port) for x in old]
- for node in new_nodes:
- if node not in old_nodes:
- return True
-
- return False
-
- def _nodes_need_update_delete(self, old, new):
- if not new:
- return True
-
- new = list(self._process_nodes(new))
- new_nodes = ["%s%s" % (x['address'], x['port']) for x in new]
- old_nodes = ["%s%s" % (x.address, x.port) for x in old]
- for node in old_nodes:
- if node not in new_nodes:
- return True
-
- return False
-
- def _nodes_need_update_change(self, old, new):
- def find_node(nodes, address, port):
- for node in nodes:
- if node['address'] == address and node['port'] == port:
- return node
-
- new = list(self._process_nodes(new))
- for old_node in old:
- new_node = find_node(new, old_node.address, old_node.port)
- if (new_node['condition'] != old_node.condition or
- new_node['type'] != old_node.type or
- new_node['weight'] != old_node.weight):
- return True
-
- return False
-
- def _needs_update_comparison(self, old, new):
- if old != new:
- return True
- return False
-
- def _needs_update_comparison_bool(self, old, new):
- if new is None:
- return old
- return self._needs_update_comparison(old, new)
-
- def _needs_update_comparison_nullable(self, old, new):
- if not old and not new:
- return False
- return self._needs_update_comparison(old, new)
-
- def _props_need_update(self, old, new):
- return self._needs_update_comparison_nullable(old, new) # dict
-
- def _hm_needs_update(self, old, new):
- return self._needs_update_comparison_nullable(old, new) # dict
-
- def _sp_needs_update(self, old, new):
- return self._needs_update_comparison_bool(old, new) # bool
-
- def _metadata_needs_update(self, old, new):
- return self._needs_update_comparison_nullable(old, new) # dict
-
- def _errorpage_needs_update(self, old, new):
- return self._needs_update_comparison_nullable(old, new) # str
-
- def _cl_needs_update(self, old, new):
- return self._needs_update_comparison_bool(old, new) # bool
-
- def _ct_needs_update(self, old, new):
- return self._needs_update_comparison_nullable(old, new) # dict
-
- def _cc_needs_update(self, old, new):
- return self._needs_update_comparison_bool(old, new) # bool
-
- def _ssl_term_needs_update(self, old, new):
- if new is None:
- return self._needs_update_comparison_nullable(
- old, new) # dict
-
- # check all relevant keys
- if (old.get(self.SSL_TERMINATION_SECURE_PORT) !=
- new[self.SSL_TERMINATION_SECURE_PORT]):
- return True
- if (old.get(self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY) !=
- new[self.SSL_TERMINATION_SECURE_TRAFFIC_ONLY]):
- return True
- if (old.get(self.SSL_TERMINATION_CERTIFICATE, '').strip() !=
- new.get(self.SSL_TERMINATION_CERTIFICATE, '').strip()):
- return True
- if (new.get(self.SSL_TERMINATION_INTERMEDIATE_CERTIFICATE, '')
- and (old.get(self.SSL_TERMINATION_INTERMEDIATE_CERTIFICATE,
- '').strip()
- != new.get(self.SSL_TERMINATION_INTERMEDIATE_CERTIFICATE,
- '').strip())):
- return True
- return False
-
- def _access_list_needs_update(self, old, new):
- old = [{key: al[key] for key in self._ACCESS_LIST_KEYS} for al in old]
- old = set([frozenset(s.items()) for s in old])
- new = set([frozenset(s.items()) for s in new])
- return old != new
-
- def _redirect_needs_update(self, old, new):
- return self._needs_update_comparison_bool(old, new) # bool
-
- def _update_props(self, lb, prop_diff):
- old_props = {}
- new_props = {}
-
- for prop in prop_diff:
- if prop in self.LB_UPDATE_PROPS:
- old_props[prop] = getattr(lb, prop)
- new_props[prop] = prop_diff[prop]
-
- if new_props and self._props_need_update(old_props, new_props):
- try:
- lb.update(**new_props)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
- return False
-
- return True
-
- def _nodes_update_data(self, lb, prop_diff):
- current_nodes = lb.nodes
- diff_nodes = self._process_nodes(prop_diff[self.NODES])
- # Loadbalancers can be uniquely identified by address and
- # port. Old is a dict of all nodes the loadbalancer
- # currently knows about.
- old = dict(("{0.address}{0.port}".format(node), node)
- for node in current_nodes)
- # New is a dict of the nodes the loadbalancer will know
- # about after this update.
- new = dict(("%s%s" % (node["address"],
- node[self.NODE_PORT]), node)
- for node in diff_nodes)
-
- old_set = set(old)
- new_set = set(new)
-
- deleted = old_set.difference(new_set)
- added = new_set.difference(old_set)
- updated = new_set.intersection(old_set)
-
- return old, new, deleted, added, updated
-
- def _update_nodes_add(self, lb, prop_diff):
- """Add loadbalancers in the new map that are not in the old map."""
- if self.NODES not in prop_diff:
- return True
-
- old_nodes = lb.nodes if hasattr(lb, self.NODES) else None
- new_nodes = prop_diff[self.NODES]
- if not self._nodes_need_update_add(old_nodes, new_nodes):
- return True
-
- old, new, deleted, added, updated = self._nodes_update_data(lb,
- prop_diff)
- new_nodes = [self.clb.Node(**new[lb_node]) for lb_node in added]
- if new_nodes:
- try:
- lb.add_nodes(new_nodes)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_nodes_delete(self, lb, prop_diff):
- """Delete loadbalancers in the old dict that aren't in the new dict."""
- if self.NODES not in prop_diff:
- return True
-
- old_nodes = lb.nodes if hasattr(lb, self.NODES) else None
- new_nodes = prop_diff[self.NODES]
- if not self._nodes_need_update_delete(old_nodes, new_nodes):
- return True
-
- old, new, deleted, added, updated = self._nodes_update_data(lb,
- prop_diff)
- for node in deleted:
- try:
- old[node].delete()
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_nodes_change(self, lb, prop_diff):
- """Update nodes that have been changed."""
- if self.NODES not in prop_diff:
- return True
-
- old_nodes = lb.nodes if hasattr(lb, self.NODES) else None
- new_nodes = prop_diff[self.NODES]
- if not self._nodes_need_update_change(old_nodes, new_nodes):
- return True
-
- old, new, deleted, added, updated = self._nodes_update_data(lb,
- prop_diff)
-
- for node in updated:
- node_changed = False
- for attribute, new_value in new[node].items():
- if new_value and new_value != getattr(old[node], attribute):
- node_changed = True
- setattr(old[node], attribute, new_value)
- if node_changed:
- try:
- old[node].update()
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_health_monitor(self, lb, prop_diff):
- if self.HEALTH_MONITOR not in prop_diff:
- return True
-
- old_hm = lb.get_health_monitor()
- new_hm = prop_diff[self.HEALTH_MONITOR]
- if not self._hm_needs_update(old_hm, new_hm):
- return True
-
- try:
- if new_hm is None:
- lb.delete_health_monitor()
- else:
- # Adding a health monitor is a destructive, so there's
- # no need to delete, then add
- lb.add_health_monitor(**new_hm)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_session_persistence(self, lb, prop_diff):
- if self.SESSION_PERSISTENCE not in prop_diff:
- return True
-
- old_sp = lb.session_persistence
- new_sp = prop_diff[self.SESSION_PERSISTENCE]
- if not self._sp_needs_update(old_sp, new_sp):
- return True
-
- try:
- if new_sp is None:
- lb.session_persistence = ''
- else:
- # Adding session persistence is destructive
- lb.session_persistence = new_sp
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_ssl_termination(self, lb, prop_diff):
- if self.SSL_TERMINATION not in prop_diff:
- return True
-
- old_ssl_term = lb.get_ssl_termination()
- new_ssl_term = prop_diff[self.SSL_TERMINATION]
- if not self._ssl_term_needs_update(old_ssl_term, new_ssl_term):
- return True
-
- try:
- if new_ssl_term is None:
- lb.delete_ssl_termination()
- else:
- # Adding SSL termination is destructive
- lb.add_ssl_termination(**new_ssl_term)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_metadata(self, lb, prop_diff):
- if self.METADATA not in prop_diff:
- return True
-
- old_metadata = lb.get_metadata()
- new_metadata = prop_diff[self.METADATA]
- if not self._metadata_needs_update(old_metadata, new_metadata):
- return True
-
- try:
- if new_metadata is None:
- lb.delete_metadata()
- else:
- lb.set_metadata(new_metadata)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_errorpage(self, lb, prop_diff):
- if self.ERROR_PAGE not in prop_diff:
- return True
-
- old_errorpage = lb.get_error_page()['errorpage']['content']
- new_errorpage = prop_diff[self.ERROR_PAGE]
- if not self._errorpage_needs_update(old_errorpage, new_errorpage):
- return True
-
- try:
- if new_errorpage is None:
- lb.clear_error_page()
- else:
- lb.set_error_page(new_errorpage)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_connection_logging(self, lb, prop_diff):
- if self.CONNECTION_LOGGING not in prop_diff:
- return True
-
- old_cl = lb.connection_logging
- new_cl = prop_diff[self.CONNECTION_LOGGING]
- if not self._cl_needs_update(old_cl, new_cl):
- return True
-
- try:
- if new_cl:
- lb.connection_logging = True
- else:
- lb.connection_logging = False
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_connection_throttle(self, lb, prop_diff):
- if self.CONNECTION_THROTTLE not in prop_diff:
- return True
-
- old_ct = lb.get_connection_throttle()
- new_ct = prop_diff[self.CONNECTION_THROTTLE]
- if not self._ct_needs_update(old_ct, new_ct):
- return True
-
- try:
- if new_ct is None:
- lb.delete_connection_throttle()
- else:
- lb.add_connection_throttle(**new_ct)
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _update_content_caching(self, lb, prop_diff):
- if self.CONTENT_CACHING not in prop_diff:
- return True
-
- old_cc = lb.content_caching
- new_cc = prop_diff[self.CONTENT_CACHING] == 'ENABLED'
- if not self._cc_needs_update(old_cc, new_cc):
- return True
-
- try:
- lb.content_caching = new_cc
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def check_delete_complete(self, *args):
- if self.resource_id is None:
- return True
-
- try:
- loadbalancer = self.clb.get(self.resource_id)
- except NotFound:
- return True
-
- if loadbalancer.status == self.DELETED_STATUS:
- return True
-
- elif loadbalancer.status == self.PENDING_DELETE_STATUS:
- return False
-
- else:
- try:
- loadbalancer.delete()
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def _remove_none(self, property_dict):
- """Remove None values that would cause schema validation problems.
-
- These are values that may be initialized to None.
- """
- return dict((key, value)
- for (key, value) in six.iteritems(property_dict)
- if value is not None)
-
- def validate(self):
- """Validate any of the provided params."""
- res = super(CloudLoadBalancer, self).validate()
- if res:
- return res
-
- if self.properties.get(self.HALF_CLOSED):
- if not (self.properties[self.PROTOCOL] == 'TCP' or
- self.properties[self.PROTOCOL] == 'TCP_CLIENT_FIRST'):
- message = (_('The %s property is only available for the TCP '
- 'or TCP_CLIENT_FIRST protocols')
- % self.HALF_CLOSED)
- raise exception.StackValidationFailed(message=message)
-
- # health_monitor connect and http types require completely different
- # schema
- if self.properties.get(self.HEALTH_MONITOR):
- prop_val = self.properties[self.HEALTH_MONITOR]
- health_monitor = self._remove_none(prop_val)
-
- schema = self._health_monitor_schema
- if health_monitor[self.HEALTH_MONITOR_TYPE] == 'CONNECT':
- schema = dict((k, v) for k, v in schema.items()
- if k in self._HEALTH_MONITOR_CONNECT_KEYS)
- properties.Properties(schema,
- health_monitor,
- function.resolve,
- self.name).validate()
-
- # validate if HTTPS_REDIRECT is true
- self._validate_https_redirect()
- # if a vip specifies and id, it can't specify version or type;
- # otherwise version and type are required
- for vip in self.properties[self.VIRTUAL_IPS]:
- has_id = vip.get(self.VIRTUAL_IP_ID) is not None
- has_version = vip.get(self.VIRTUAL_IP_IP_VERSION) is not None
- has_type = vip.get(self.VIRTUAL_IP_TYPE) is not None
- if has_id:
- if (has_version or has_type):
- message = _("Cannot specify type or version if VIP id is"
- " specified.")
- raise exception.StackValidationFailed(message=message)
- elif not (has_version and has_type):
- message = _("Must specify VIP type and version if no id "
- "specified.")
- raise exception.StackValidationFailed(message=message)
-
- def _public_ip(self, lb):
- for ip in lb.virtual_ips:
- if ip.type == 'PUBLIC':
- return six.text_type(ip.address)
-
- def _resolve_attribute(self, key):
- if self.resource_id:
- lb = self.clb.get(self.resource_id)
- attribute_function = {
- self.PUBLIC_IP: self._public_ip(lb),
- self.VIPS: [{"id": vip.id,
- "type": vip.type,
- "ip_version": vip.ip_version,
- "address": vip.address}
- for vip in lb.virtual_ips]
- }
- if key not in attribute_function:
- raise exception.InvalidTemplateAttribute(resource=self.name,
- key=key)
- function = attribute_function[key]
- LOG.info('%(name)s.GetAtt(%(key)s) == %(function)s',
- {'name': self.name, 'key': key, 'function': function})
- return function
-
-
-def resource_mapping():
- return {'Rackspace::Cloud::LoadBalancer': CloudLoadBalancer}
-
-
-def available_resource_mapping():
- if PYRAX_INSTALLED:
- return resource_mapping()
- return {}
diff --git a/contrib/rackspace/rackspace/resources/cloud_server.py b/contrib/rackspace/rackspace/resources/cloud_server.py
deleted file mode 100644
index b76a1bfc2..000000000
--- a/contrib/rackspace/rackspace/resources/cloud_server.py
+++ /dev/null
@@ -1,309 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from oslo_log import log as logging
-
-from heat.common import exception
-from heat.common.i18n import _
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine.resources.openstack.nova import server
-from heat.engine import support
-
-
-try:
- import pyrax # noqa
- PYRAX_INSTALLED = True
-except ImportError:
- PYRAX_INSTALLED = False
-
-LOG = logging.getLogger(__name__)
-
-
-class CloudServer(server.Server):
- """Resource for Rackspace Cloud Servers.
-
- This resource overloads existent integrated OS::Nova::Server resource and
- is used for Rackspace Cloud Servers.
- """
-
- support_status = support.SupportStatus(
- status=support.UNSUPPORTED,
- message=_('This resource is not supported, use at your own risk.'))
-
- # Rackspace Cloud automation statuses
- SM_STATUS_IN_PROGRESS = 'In Progress'
- SM_STATUS_COMPLETE = 'Complete'
- SM_STATUS_BUILD_ERROR = 'Build Error'
-
- # RackConnect automation statuses
- RC_STATUS_DEPLOYING = 'DEPLOYING'
- RC_STATUS_DEPLOYED = 'DEPLOYED'
- RC_STATUS_FAILED = 'FAILED'
- RC_STATUS_UNPROCESSABLE = 'UNPROCESSABLE'
-
- # Nova Extra specs
- FLAVOR_EXTRA_SPECS = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
- FLAVOR_CLASSES_KEY = 'flavor_classes'
- FLAVOR_ACCEPT_ANY = '*'
- FLAVOR_CLASS = 'class'
- DISK_IO_INDEX = 'disk_io_index'
- FLAVOR_CLASSES = (
- GENERAL1, MEMORY1, PERFORMANCE2, PERFORMANCE1, STANDARD1, IO1,
- ONMETAL, COMPUTE1
- ) = (
- 'general1', 'memory1', 'performance2', 'performance1',
- 'standard1', 'io1', 'onmetal', 'compute1',
- )
- BASE_IMAGE_REF = 'base_image_ref'
-
- # flavor classes that can be booted ONLY from volume
- BFV_VOLUME_REQUIRED = {MEMORY1, COMPUTE1}
-
- # flavor classes that can NOT be booted from volume
- NON_BFV = {STANDARD1, ONMETAL}
-
- properties_schema = copy.deepcopy(server.Server.properties_schema)
- properties_schema.update(
- {
- server.Server.USER_DATA_FORMAT: properties.Schema(
- properties.Schema.STRING,
- _('How the user_data should be formatted for the server. '
- 'For RAW the user_data is passed to Nova unmodified. '
- 'For SOFTWARE_CONFIG user_data is bundled as part of the '
- 'software config data, and metadata is derived from any '
- 'associated SoftwareDeployment resources.'),
- default=server.Server.RAW,
- constraints=[
- constraints.AllowedValues([
- server.Server.RAW, server.Server.SOFTWARE_CONFIG
- ])
- ]
- ),
- }
- )
- properties_schema.update(
- {
- server.Server.SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
- properties.Schema.STRING,
- _('How the server should receive the metadata required for '
- 'software configuration. POLL_TEMP_URL is the only '
- 'supported transport on Rackspace Cloud. This property is '
- 'retained for compatibility.'),
- default=server.Server.POLL_TEMP_URL,
- update_allowed=True,
- constraints=[
- constraints.AllowedValues([
- server.Server.POLL_TEMP_URL
- ])
- ]
- ),
- }
- )
-
- def __init__(self, name, json_snippet, stack):
- super(CloudServer, self).__init__(name, json_snippet, stack)
- self._managed_cloud_started_event_sent = False
- self._rack_connect_started_event_sent = False
-
- def _config_drive(self):
- user_data_format = self.properties[self.USER_DATA_FORMAT]
- is_sw_config = user_data_format == self.SOFTWARE_CONFIG
- user_data = self.properties.get(self.USER_DATA)
- config_drive = self.properties.get(self.CONFIG_DRIVE)
- if config_drive or is_sw_config or user_data:
- return True
- else:
- return False
-
- def _check_rax_automation_complete(self, server):
- if not self._managed_cloud_started_event_sent:
- msg = _("Waiting for Rackspace Cloud automation to complete")
- self._add_event(self.action, self.status, msg)
- self._managed_cloud_started_event_sent = True
-
- if 'rax_service_level_automation' not in server.metadata:
- LOG.debug("Cloud server does not have the "
- "rax_service_level_automation metadata tag yet")
- return False
-
- mc_status = server.metadata['rax_service_level_automation']
- LOG.debug("Rackspace Cloud automation status: %s" % mc_status)
-
- if mc_status == self.SM_STATUS_IN_PROGRESS:
- return False
-
- elif mc_status == self.SM_STATUS_COMPLETE:
- msg = _("Rackspace Cloud automation has completed")
- self._add_event(self.action, self.status, msg)
- return True
-
- elif mc_status == self.SM_STATUS_BUILD_ERROR:
- raise exception.Error(_("Rackspace Cloud automation failed"))
-
- else:
- raise exception.Error(_("Unknown Rackspace Cloud automation "
- "status: %s") % mc_status)
-
- def _check_rack_connect_complete(self, server):
- if not self._rack_connect_started_event_sent:
- msg = _("Waiting for RackConnect automation to complete")
- self._add_event(self.action, self.status, msg)
- self._rack_connect_started_event_sent = True
-
- if 'rackconnect_automation_status' not in server.metadata:
- LOG.debug("RackConnect server does not have the "
- "rackconnect_automation_status metadata tag yet")
- return False
-
- rc_status = server.metadata['rackconnect_automation_status']
- LOG.debug("RackConnect automation status: %s" % rc_status)
-
- if rc_status == self.RC_STATUS_DEPLOYING:
- return False
-
- elif rc_status == self.RC_STATUS_DEPLOYED:
- self._server = None # The public IP changed, forget old one
- return True
-
- elif rc_status == self.RC_STATUS_UNPROCESSABLE:
- # UNPROCESSABLE means the RackConnect automation was not
- # attempted (eg. Cloud Server in a different DC than
- # dedicated gear, so RackConnect does not apply). It is
- # okay if we do not raise an exception.
- reason = server.metadata.get('rackconnect_unprocessable_reason',
- None)
- if reason is not None:
- LOG.warning("RackConnect unprocessable reason: %s",
- reason)
-
- msg = _("RackConnect automation has completed")
- self._add_event(self.action, self.status, msg)
- return True
-
- elif rc_status == self.RC_STATUS_FAILED:
- raise exception.Error(_("RackConnect automation FAILED"))
-
- else:
- msg = _("Unknown RackConnect automation status: %s") % rc_status
- raise exception.Error(msg)
-
- def check_create_complete(self, server_id):
- """Check if server creation is complete and handle server configs."""
- if not super(CloudServer, self).check_create_complete(server_id):
- return False
-
- server = self.client_plugin().fetch_server(server_id)
- if not server:
- return False
-
- if ('rack_connect' in self.context.roles and not
- self._check_rack_connect_complete(server)):
- return False
-
- if not self._check_rax_automation_complete(server):
- return False
-
- return True
-
- # Since rackspace compute service does not support 'os-interface' endpoint,
- # accessing addresses attribute of OS::Nova::Server results in NotFound
- # error. Here overrdiing '_add_port_for_address' method and using different
- # endpoint named 'os-virtual-interfacesv2' to get the same information.
- def _add_port_for_address(self, server):
- def get_port(net_name, address):
- for iface in ifaces:
- for ip_addr in iface.ip_addresses:
- if ip_addr['network_label'] == net_name and ip_addr[
- 'address'] == address:
- return iface.id
-
- nets = copy.deepcopy(server.addresses)
- nova_ext = self.client().os_virtual_interfacesv2_python_novaclient_ext
- ifaces = nova_ext.list(server.id)
- for net_name, addresses in nets.items():
- for address in addresses:
- address['port'] = get_port(net_name, address['addr'])
-
- return self._extend_networks(nets)
-
- def _base_image_obj(self, image):
- image_obj = self.client_plugin('glance').get_image(image)
- if self.BASE_IMAGE_REF in image_obj:
- base_image = image_obj[self.BASE_IMAGE_REF]
- return self.client_plugin('glance').get_image(base_image)
- return image_obj
-
- def _image_flavor_class_match(self, flavor_type, image):
- base_image_obj = self._base_image_obj(image)
- flavor_class_string = base_image_obj.get(self.FLAVOR_CLASSES_KEY)
-
- # If the flavor_class_string metadata does not exist or is
- # empty, do not validate image/flavor combo
- if not flavor_class_string:
- return True
-
- flavor_class_excluded = "!{0}".format(flavor_type)
- flavor_classes_accepted = flavor_class_string.split(',')
-
- if flavor_type in flavor_classes_accepted:
- return True
-
- if (self.FLAVOR_ACCEPT_ANY in flavor_classes_accepted and
- flavor_class_excluded not in flavor_classes_accepted):
- return True
-
- return False
-
- def validate(self):
- """Validate for Rackspace Cloud specific parameters"""
- super(CloudServer, self).validate()
-
- # check if image, flavor combination is valid
- flavor = self.properties[self.FLAVOR]
- flavor_obj = self.client_plugin().get_flavor(flavor)
- fl_xtra_specs = flavor_obj.to_dict().get(self.FLAVOR_EXTRA_SPECS, {})
- flavor_type = fl_xtra_specs.get(self.FLAVOR_CLASS, None)
-
- image = self.properties.get(self.IMAGE)
- if not image:
- if flavor_type in self.NON_BFV:
- msg = _('Flavor %s cannot be booted from volume.') % flavor
- raise exception.StackValidationFailed(message=msg)
- else:
- # we cannot determine details of the attached volume, so this
- # is all the validation possible
- return
-
- if not self._image_flavor_class_match(flavor_type, image):
- msg = _('Flavor %(flavor)s cannot be used with image '
- '%(image)s.') % {'image': image, 'flavor': flavor}
- raise exception.StackValidationFailed(message=msg)
-
- if flavor_type in self.BFV_VOLUME_REQUIRED:
- msg = _('Flavor %(flavor)s must be booted from volume, '
- 'but image %(image)s was also specified.') % {
- 'flavor': flavor, 'image': image}
- raise exception.StackValidationFailed(message=msg)
-
-
-def resource_mapping():
- return {'OS::Nova::Server': CloudServer}
-
-
-def available_resource_mapping():
- if PYRAX_INSTALLED:
- return resource_mapping()
- return {}
diff --git a/contrib/rackspace/rackspace/resources/cloudnetworks.py b/contrib/rackspace/rackspace/resources/cloudnetworks.py
deleted file mode 100644
index ef7cc60c7..000000000
--- a/contrib/rackspace/rackspace/resources/cloudnetworks.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-import six
-
-from heat.common.i18n import _
-from heat.engine import attributes
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine import resource
-from heat.engine import support
-
-try:
- from pyrax.exceptions import NetworkInUse # noqa
- from pyrax.exceptions import NotFound # noqa
- PYRAX_INSTALLED = True
-except ImportError:
- PYRAX_INSTALLED = False
-
- class NotFound(Exception):
- """Dummy pyrax exception - only used for testing."""
-
- class NetworkInUse(Exception):
- """Dummy pyrax exception - only used for testing."""
-
-
-LOG = logging.getLogger(__name__)
-
-
-class CloudNetwork(resource.Resource):
- """A resource for creating Rackspace Cloud Networks.
-
- See http://www.rackspace.com/cloud/networks/ for service
- documentation.
- """
-
- support_status = support.SupportStatus(
- status=support.HIDDEN,
- version='6.0.0',
- previous_status=support.SupportStatus(
- status=support.DEPRECATED,
- message=_('Use OS::Neutron::Net instead.'),
- version='2015.1',
- previous_status=support.SupportStatus(version='2014.1')
- )
- )
-
- PROPERTIES = (
- LABEL, CIDR
- ) = (
- "label", "cidr"
- )
-
- ATTRIBUTES = (
- CIDR_ATTR, LABEL_ATTR,
- ) = (
- 'cidr', 'label',
- )
-
- properties_schema = {
- LABEL: properties.Schema(
- properties.Schema.STRING,
- _("The name of the network."),
- required=True,
- constraints=[
- constraints.Length(min=3, max=64)
- ]
- ),
- CIDR: properties.Schema(
- properties.Schema.STRING,
- _("The IP block from which to allocate the network. For example, "
- "172.16.0.0/24 or 2001:DB8::/64."),
- required=True,
- constraints=[
- constraints.CustomConstraint('net_cidr')
- ]
- )
- }
-
- attributes_schema = {
- CIDR_ATTR: attributes.Schema(
- _("The CIDR for an isolated private network.")
- ),
- LABEL_ATTR: attributes.Schema(
- _("The name of the network.")
- ),
- }
-
- def __init__(self, name, json_snippet, stack):
- resource.Resource.__init__(self, name, json_snippet, stack)
- self._network = None
- self._delete_issued = False
-
- def network(self):
- if self.resource_id and not self._network:
- try:
- self._network = self.cloud_networks().get(self.resource_id)
- except NotFound:
- LOG.warning("Could not find network %s but resource id is"
- " set.", self.resource_id)
- return self._network
-
- def cloud_networks(self):
- return self.client('cloud_networks')
-
- def handle_create(self):
- cnw = self.cloud_networks().create(label=self.properties[self.LABEL],
- cidr=self.properties[self.CIDR])
- self.resource_id_set(cnw.id)
-
- def handle_check(self):
- self.cloud_networks().get(self.resource_id)
-
- def check_delete_complete(self, cookie):
- if not self.resource_id:
- return True
-
- try:
- network = self.cloud_networks().get(self.resource_id)
- except NotFound:
- return True
-
- if not network:
- return True
-
- if not self._delete_issued:
- try:
- network.delete()
- except NetworkInUse:
- LOG.warning("Network '%s' still in use.", network.id)
- else:
- self._delete_issued = True
- return False
-
- return False
-
- def validate(self):
- super(CloudNetwork, self).validate()
-
- def _resolve_attribute(self, name):
- net = self.network()
- if net:
- return six.text_type(getattr(net, name))
- return ""
-
-
-def resource_mapping():
- return {'Rackspace::Cloud::Network': CloudNetwork}
-
-
-def available_resource_mapping():
- if PYRAX_INSTALLED:
- return resource_mapping()
- return {}
diff --git a/contrib/rackspace/rackspace/resources/lb_node.py b/contrib/rackspace/rackspace/resources/lb_node.py
deleted file mode 100644
index d25f1febb..000000000
--- a/contrib/rackspace/rackspace/resources/lb_node.py
+++ /dev/null
@@ -1,230 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from oslo_utils import timeutils
-import six
-
-from heat.common import exception
-from heat.common.i18n import _
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine import resource
-
-try:
- from pyrax.exceptions import NotFound # noqa
- PYRAX_INSTALLED = True
-except ImportError:
- # Setup fake exception for testing without pyrax
- class NotFound(Exception):
- pass
- PYRAX_INSTALLED = False
-
-
-def lb_immutable(exc):
- return 'immutable' in six.text_type(exc)
-
-
-class LoadbalancerDeleted(exception.HeatException):
- msg_fmt = _("The Load Balancer (ID %(lb_id)s) has been deleted.")
-
-
-class NodeNotFound(exception.HeatException):
- msg_fmt = _("Node (ID %(node_id)s) not found on Load Balancer "
- "(ID %(lb_id)s).")
-
-
-class LBNode(resource.Resource):
- """Represents a single node of a Rackspace Cloud Load Balancer"""
-
- default_client_name = 'cloud_lb'
-
- _CONDITIONS = (
- ENABLED, DISABLED, DRAINING,
- ) = (
- 'ENABLED', 'DISABLED', 'DRAINING',
- )
-
- _NODE_KEYS = (
- ADDRESS, PORT, CONDITION, TYPE, WEIGHT
- ) = (
- 'address', 'port', 'condition', 'type', 'weight'
- )
-
- _OTHER_KEYS = (
- LOAD_BALANCER, DRAINING_TIMEOUT
- ) = (
- 'load_balancer', 'draining_timeout'
- )
-
- PROPERTIES = _NODE_KEYS + _OTHER_KEYS
-
- properties_schema = {
- LOAD_BALANCER: properties.Schema(
- properties.Schema.STRING,
- _("The ID of the load balancer to associate the node with."),
- required=True
- ),
- DRAINING_TIMEOUT: properties.Schema(
- properties.Schema.INTEGER,
- _("The time to wait, in seconds, for the node to drain before it "
- "is deleted."),
- default=0,
- constraints=[
- constraints.Range(min=0)
- ],
- update_allowed=True
- ),
- ADDRESS: properties.Schema(
- properties.Schema.STRING,
- _("IP address for the node."),
- required=True
- ),
- PORT: properties.Schema(
- properties.Schema.INTEGER,
- required=True
- ),
- CONDITION: properties.Schema(
- properties.Schema.STRING,
- default=ENABLED,
- constraints=[
- constraints.AllowedValues(_CONDITIONS),
- ],
- update_allowed=True
- ),
- TYPE: properties.Schema(
- properties.Schema.STRING,
- constraints=[
- constraints.AllowedValues(['PRIMARY',
- 'SECONDARY']),
- ],
- update_allowed=True
- ),
- WEIGHT: properties.Schema(
- properties.Schema.NUMBER,
- constraints=[
- constraints.Range(1, 100),
- ],
- update_allowed=True
- ),
- }
-
- def lb(self):
- lb_id = self.properties.get(self.LOAD_BALANCER)
- lb = self.client().get(lb_id)
-
- if lb.status in ('DELETED', 'PENDING_DELETE'):
- raise LoadbalancerDeleted(lb_id=lb.id)
-
- return lb
-
- def node(self, lb):
- for node in getattr(lb, 'nodes', []):
- if node.id == self.resource_id:
- return node
- raise NodeNotFound(node_id=self.resource_id, lb_id=lb.id)
-
- def handle_create(self):
- pass
-
- def check_create_complete(self, *args):
- node_args = {k: self.properties.get(k) for k in self._NODE_KEYS}
- node = self.client().Node(**node_args)
-
- try:
- resp, body = self.lb().add_nodes([node])
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- new_node = body['nodes'][0]
- node_id = new_node['id']
-
- self.resource_id_set(node_id)
- return True
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- return prop_diff
-
- def check_update_complete(self, prop_diff):
- node = self.node(self.lb())
- is_complete = True
-
- for key in self._NODE_KEYS:
- if key in prop_diff and getattr(node, key, None) != prop_diff[key]:
- setattr(node, key, prop_diff[key])
- is_complete = False
-
- if is_complete:
- return True
-
- try:
- node.update()
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
- def handle_delete(self):
- return timeutils.utcnow()
-
- def check_delete_complete(self, deleted_at):
- if self.resource_id is None:
- return True
-
- try:
- node = self.node(self.lb())
- except (NotFound, LoadbalancerDeleted, NodeNotFound):
- return True
-
- if isinstance(deleted_at, six.string_types):
- deleted_at = timeutils.parse_isotime(deleted_at)
-
- deleted_at = timeutils.normalize_time(deleted_at)
- waited = timeutils.utcnow() - deleted_at
- timeout_secs = self.properties[self.DRAINING_TIMEOUT]
- timeout_secs = datetime.timedelta(seconds=timeout_secs)
-
- if waited > timeout_secs:
- try:
- node.delete()
- except NotFound:
- return True
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
- elif node.condition != self.DRAINING:
- node.condition = self.DRAINING
- try:
- node.update()
- except Exception as exc:
- if lb_immutable(exc):
- return False
- raise
-
- return False
-
-
-def resource_mapping():
- return {'Rackspace::Cloud::LBNode': LBNode}
-
-
-def available_resource_mapping():
- if PYRAX_INSTALLED:
- return resource_mapping()
- return {}
diff --git a/contrib/rackspace/rackspace/tests/__init__.py b/contrib/rackspace/rackspace/tests/__init__.py
deleted file mode 100644
index b1967c8f3..000000000
--- a/contrib/rackspace/rackspace/tests/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import sys
-
-from mox3 import mox
-
-sys.modules['mox'] = mox
diff --git a/contrib/rackspace/rackspace/tests/test_auto_scale.py b/contrib/rackspace/rackspace/tests/test_auto_scale.py
deleted file mode 100644
index 44ce5d802..000000000
--- a/contrib/rackspace/rackspace/tests/test_auto_scale.py
+++ /dev/null
@@ -1,1219 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import itertools
-
-import mock
-import six
-
-from heat.common import exception
-from heat.common import template_format
-from heat.engine.clients.os import glance
-from heat.engine.clients.os import nova
-from heat.engine import resource
-from heat.engine import rsrc_defn
-from heat.engine import scheduler
-from heat.tests import common
-from heat.tests import utils
-
-from ..resources import auto_scale # noqa
-
-
-class FakeScalingGroup(object):
- """A fake implementation of pyrax's ScalingGroup object."""
- def __init__(self, id, **kwargs):
- self.id = id
- self.kwargs = kwargs
-
-
-class FakeScalePolicy(object):
- """A fake implementation of pyrax's AutoScalePolicy object."""
- def __init__(self, id, **kwargs):
- self.id = id
- self.kwargs = kwargs
-
-
-class FakeWebHook(object):
- """A fake implementation of pyrax's AutoScaleWebhook object."""
- def __init__(self, id, **kwargs):
- self.id = id
- self.kwargs = kwargs
- self.links = [
- {'rel': 'self', 'href': 'self-url'},
- {'rel': 'capability', 'href': 'capability-url'}]
-
-
-class FakeAutoScale(object):
- """A fake implementation of pyrax's autoscale client."""
-
- def __init__(self):
- self.groups = {}
- self.policies = {}
- self.webhooks = {}
- self.group_counter = itertools.count()
- self.policy_counter = itertools.count()
- self.webhook_counter = itertools.count()
-
- def create(self, **kwargs):
- """Create a scaling group."""
- new_id = str(next(self.group_counter))
- fsg = FakeScalingGroup(new_id, **kwargs)
- self.groups[new_id] = fsg
- return fsg
-
- def _check_args(self, kwargs, allowed):
- for parameter in kwargs:
- if parameter not in allowed:
- raise TypeError("unexpected argument %r" % (parameter,))
-
- def _get_group(self, id):
- if id not in self.groups:
- raise auto_scale.NotFound("Group %s not found!" % (id,))
- return self.groups[id]
-
- def _get_policy(self, id):
- if id not in self.policies:
- raise auto_scale.NotFound("Policy %s not found!" % (id,))
- return self.policies[id]
-
- def _get_webhook(self, webhook_id):
- if webhook_id not in self.webhooks:
- raise auto_scale.NotFound(
- "Webhook %s doesn't exist!" % (webhook_id,))
- return self.webhooks[webhook_id]
-
- def replace(self, group_id, **kwargs):
- """Update the groupConfiguration section of a scaling group."""
- allowed = ['name', 'cooldown',
- 'min_entities', 'max_entities', 'metadata']
- self._check_args(kwargs, allowed)
- self._get_group(group_id).kwargs = kwargs
-
- def replace_launch_config(self, group_id, **kwargs):
- """Update the launch configuration on a scaling group."""
- if kwargs.get('launch_config_type') == 'launch_server':
- allowed = ['launch_config_type', 'server_name', 'image', 'flavor',
- 'disk_config', 'metadata', 'personality', 'networks',
- 'load_balancers', 'key_name', 'user_data',
- 'config_drive']
- elif kwargs.get('launch_config_type') == 'launch_stack':
- allowed = ['launch_config_type', 'template', 'template_url',
- 'disable_rollback', 'environment', 'files',
- 'parameters', 'timeout_mins']
- self._check_args(kwargs, allowed)
- self._get_group(group_id).kwargs = kwargs
-
- def delete(self, group_id):
- """Delete the group, if the min entities and max entities are 0."""
- group = self._get_group(group_id)
- if (group.kwargs['min_entities'] > 0
- or group.kwargs['max_entities'] > 0):
- raise Exception("Can't delete yet!")
- del self.groups[group_id]
-
- def add_policy(self, **kwargs):
- """Create and store a FakeScalePolicy."""
- allowed = [
- 'scaling_group', 'name', 'policy_type', 'cooldown', 'change',
- 'is_percent', 'desired_capacity', 'args']
- self._check_args(kwargs, allowed)
- policy_id = str(next(self.policy_counter))
- policy = FakeScalePolicy(policy_id, **kwargs)
- self.policies[policy_id] = policy
- return policy
-
- def replace_policy(self, scaling_group, policy, **kwargs):
- allowed = [
- 'name', 'policy_type', 'cooldown',
- 'change', 'is_percent', 'desired_capacity', 'args']
- self._check_args(kwargs, allowed)
- policy = self._get_policy(policy)
- assert policy.kwargs['scaling_group'] == scaling_group
- kwargs['scaling_group'] = scaling_group
- policy.kwargs = kwargs
-
- def add_webhook(self, **kwargs):
- """Create and store a FakeWebHook."""
- allowed = ['scaling_group', 'policy', 'name', 'metadata']
- self._check_args(kwargs, allowed)
- webhook_id = str(next(self.webhook_counter))
- webhook = FakeWebHook(webhook_id, **kwargs)
- self.webhooks[webhook_id] = webhook
- return webhook
-
- def delete_policy(self, scaling_group, policy):
- """Delete a policy, if it exists."""
- if policy not in self.policies:
- raise auto_scale.NotFound("Policy %s doesn't exist!" % (policy,))
- assert self.policies[policy].kwargs['scaling_group'] == scaling_group
- del self.policies[policy]
-
- def delete_webhook(self, scaling_group, policy, webhook_id):
- """Delete a webhook, if it exists."""
- webhook = self._get_webhook(webhook_id)
- assert webhook.kwargs['scaling_group'] == scaling_group
- assert webhook.kwargs['policy'] == policy
- del self.webhooks[webhook_id]
-
- def replace_webhook(self, scaling_group, policy, webhook,
- name=None, metadata=None):
- webhook = self._get_webhook(webhook)
- assert webhook.kwargs['scaling_group'] == scaling_group
- assert webhook.kwargs['policy'] == policy
- webhook.kwargs['name'] = name
- webhook.kwargs['metadata'] = metadata
-
-
-class ScalingGroupTest(common.HeatTestCase):
-
- server_template = template_format.parse('''
- HeatTemplateFormatVersion: "2012-12-12"
- Description: "Rackspace Auto Scale"
- Parameters: {}
- Resources:
- my_group:
- Type: Rackspace::AutoScale::Group
- Properties:
- groupConfiguration:
- name: "My Group"
- cooldown: 60
- minEntities: 1
- maxEntities: 25
- metadata:
- group: metadata
- launchConfiguration:
- type: "launch_server"
- args:
- server:
- name: autoscaled-server
- flavorRef: flavor-ref
- imageRef: image-ref
- key_name: my-key
- metadata:
- server: metadata
- personality:
- /tmp/testfile: "dGVzdCBjb250ZW50"
- networks:
- - uuid: "00000000-0000-0000-0000-000000000000"
- - uuid: "11111111-1111-1111-1111-111111111111"
- loadBalancers:
- - loadBalancerId: 234
- port: 80
-
- ''')
-
- stack_template = template_format.parse('''
- HeatTemplateFormatVersion: "2012-12-12"
- Description: "Rackspace Auto Scale"
- Parameters: {}
- Resources:
- my_group:
- Type: Rackspace::AutoScale::Group
- Properties:
- groupConfiguration:
- name: "My Group"
- cooldown: 60
- minEntities: 1
- maxEntities: 25
- metadata:
- group: metadata
- launchConfiguration:
- type: launch_stack
- args:
- stack:
- template: |
- heat_template_version: 2015-10-15
- description: This is a Heat template
- parameters:
- image:
- default: cirros-0.3.4-x86_64-uec
- type: string
- flavor:
- default: m1.tiny
- type: string
- resources:
- rand:
- type: OS::Heat::RandomString
- disable_rollback: False
- environment:
- parameters:
- image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
- resource_registry:
- Heat::InstallConfigAgent:
- https://myhost.com/bootconfig.yaml
- files:
- fileA.yaml: Contents of the file
- file:///usr/fileB.template: Contents of the file
- parameters:
- flavor: 4 GB Performance
- timeout_mins: 30
- ''')
-
- def setUp(self):
- super(ScalingGroupTest, self).setUp()
- for res_name, res_class in auto_scale.resource_mapping().items():
- resource._register_class(res_name, res_class)
- self.fake_auto_scale = FakeAutoScale()
- self.patchobject(auto_scale.Group, 'auto_scale',
- return_value=self.fake_auto_scale)
- # mock nova and glance client methods to satisfy constraints
- mock_im = self.patchobject(glance.GlanceClientPlugin,
- 'find_image_by_name_or_id')
- mock_im.return_value = 'image-ref'
- mock_fl = self.patchobject(nova.NovaClientPlugin,
- 'find_flavor_by_name_or_id')
- mock_fl.return_value = 'flavor-ref'
-
- def _setup_test_stack(self, template=None):
- if template is None:
- template = self.server_template
- self.stack = utils.parse_stack(template)
- self.stack.create()
- self.assertEqual(
- ('CREATE', 'COMPLETE'), self.stack.state,
- self.stack.status_reason)
-
- def test_group_create_server(self):
- """Creating a group passes all the correct arguments to pyrax.
-
- Also saves the group ID as the resource ID.
- """
- self._setup_test_stack()
- self.assertEqual(1, len(self.fake_auto_scale.groups))
- self.assertEqual(
- {
- 'cooldown': 60,
- 'config_drive': False,
- 'user_data': None,
- 'disk_config': None,
- 'flavor': 'flavor-ref',
- 'image': 'image-ref',
- 'load_balancers': [{
- 'loadBalancerId': 234,
- 'port': 80,
- }],
- 'key_name': "my-key",
- 'launch_config_type': u'launch_server',
- 'max_entities': 25,
- 'group_metadata': {'group': 'metadata'},
- 'metadata': {'server': 'metadata'},
- 'min_entities': 1,
- 'name': 'My Group',
- 'networks': [{'uuid': '00000000-0000-0000-0000-000000000000'},
- {'uuid': '11111111-1111-1111-1111-111111111111'}],
- 'personality': [{
- 'path': u'/tmp/testfile',
- 'contents': u'dGVzdCBjb250ZW50'}],
- 'server_name': u'autoscaled-server'},
- self.fake_auto_scale.groups['0'].kwargs)
-
- resource = self.stack['my_group']
- self.assertEqual('0', resource.FnGetRefId())
-
- def test_group_create_stack(self):
- """Creating a group passes all the correct arguments to pyrax.
-
- Also saves the group ID as the resource ID.
- """
- self._setup_test_stack(self.stack_template)
- self.assertEqual(1, len(self.fake_auto_scale.groups))
- self.assertEqual(
- {
- 'cooldown': 60,
- 'min_entities': 1,
- 'max_entities': 25,
- 'group_metadata': {'group': 'metadata'},
- 'name': 'My Group',
- 'launch_config_type': u'launch_stack',
- 'template': (
- '''heat_template_version: 2015-10-15
-description: This is a Heat template
-parameters:
- image:
- default: cirros-0.3.4-x86_64-uec
- type: string
- flavor:
- default: m1.tiny
- type: string
-resources:
- rand:
- type: OS::Heat::RandomString
-'''),
- 'template_url': None,
- 'disable_rollback': False,
- 'environment': {
- 'parameters': {
- 'image':
- 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
- },
- 'resource_registry': {
- 'Heat::InstallConfigAgent': ('https://myhost.com/'
- 'bootconfig.yaml')
- }
- },
- 'files': {
- 'fileA.yaml': 'Contents of the file',
- 'file:///usr/fileB.template': 'Contents of the file'
- },
- 'parameters': {
- 'flavor': '4 GB Performance',
- },
- 'timeout_mins': 30,
- },
- self.fake_auto_scale.groups['0'].kwargs
- )
-
- resource = self.stack['my_group']
- self.assertEqual('0', resource.FnGetRefId())
-
- def test_group_create_no_personality(self):
-
- template = template_format.parse('''
-HeatTemplateFormatVersion: "2012-12-12"
-Description: "Rackspace Auto Scale"
-Parameters: {}
-Resources:
- my_group:
- Type: Rackspace::AutoScale::Group
- Properties:
- groupConfiguration:
- name: "My Group"
- cooldown: 60
- minEntities: 1
- maxEntities: 25
- metadata:
- group: metadata
- launchConfiguration:
- type: "launch_server"
- args:
- server:
- name: autoscaled-server
- flavorRef: flavor-ref
- imageRef: image-ref
- key_name: my-key
- metadata:
- server: metadata
- networks:
- - uuid: "00000000-0000-0000-0000-000000000000"
- - uuid: "11111111-1111-1111-1111-111111111111"
-''')
-
- self.stack = utils.parse_stack(template)
- self.stack.create()
- self.assertEqual(
- ('CREATE', 'COMPLETE'), self.stack.state,
- self.stack.status_reason)
-
- self.assertEqual(1, len(self.fake_auto_scale.groups))
- self.assertEqual(
- {
- 'cooldown': 60,
- 'config_drive': False,
- 'user_data': None,
- 'disk_config': None,
- 'flavor': 'flavor-ref',
- 'image': 'image-ref',
- 'launch_config_type': 'launch_server',
- 'load_balancers': [],
- 'key_name': "my-key",
- 'max_entities': 25,
- 'group_metadata': {'group': 'metadata'},
- 'metadata': {'server': 'metadata'},
- 'min_entities': 1,
- 'name': 'My Group',
- 'networks': [{'uuid': '00000000-0000-0000-0000-000000000000'},
- {'uuid': '11111111-1111-1111-1111-111111111111'}],
- 'personality': None,
- 'server_name': u'autoscaled-server'},
- self.fake_auto_scale.groups['0'].kwargs)
-
- resource = self.stack['my_group']
- self.assertEqual('0', resource.FnGetRefId())
-
- def test_check(self):
- self._setup_test_stack()
- resource = self.stack['my_group']
- mock_get = mock.Mock()
- resource.auto_scale().get = mock_get
- scheduler.TaskRunner(resource.check)()
- self.assertEqual('CHECK', resource.action)
- self.assertEqual('COMPLETE', resource.status)
-
- mock_get.side_effect = auto_scale.NotFound('boom')
- exc = self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(resource.check))
- self.assertEqual('CHECK', resource.action)
- self.assertEqual('FAILED', resource.status)
- self.assertIn('boom', str(exc))
-
- def test_update_group_config(self):
- """Updates the groupConfiguration section.
-
- Updates the groupConfiguration section in a template results in a
- pyrax call to update the group configuration.
- """
- self._setup_test_stack()
-
- resource = self.stack['my_group']
- uprops = copy.deepcopy(dict(resource.properties.data))
- uprops['groupConfiguration']['minEntities'] = 5
- new_template = rsrc_defn.ResourceDefinition(resource.name,
- resource.type(),
- uprops)
- scheduler.TaskRunner(resource.update, new_template)()
-
- self.assertEqual(1, len(self.fake_auto_scale.groups))
- self.assertEqual(
- 5, self.fake_auto_scale.groups['0'].kwargs['min_entities'])
-
- def test_update_launch_config_server(self):
- """Updates the launchConfigresults section.
-
- Updates the launchConfigresults section in a template results in a
- pyrax call to update the launch configuration.
- """
- self._setup_test_stack()
-
- resource = self.stack['my_group']
- uprops = copy.deepcopy(dict(resource.properties.data))
- lcargs = uprops['launchConfiguration']['args']
- lcargs['loadBalancers'] = [{'loadBalancerId': '1', 'port': 80}]
- new_template = rsrc_defn.ResourceDefinition(resource.name,
- resource.type(),
- uprops)
-
- scheduler.TaskRunner(resource.update, new_template)()
-
- self.assertEqual(1, len(self.fake_auto_scale.groups))
- self.assertEqual(
- [{'loadBalancerId': 1, 'port': 80}],
- self.fake_auto_scale.groups['0'].kwargs['load_balancers'])
-
- def test_update_launch_config_stack(self):
- self._setup_test_stack(self.stack_template)
-
- resource = self.stack['my_group']
- uprops = copy.deepcopy(dict(resource.properties.data))
- lcargs = uprops['launchConfiguration']['args']
- lcargs['stack']['timeout_mins'] = 60
- new_template = rsrc_defn.ResourceDefinition(resource.name,
- resource.type(),
- uprops)
-
- scheduler.TaskRunner(resource.update, new_template)()
-
- self.assertEqual(1, len(self.fake_auto_scale.groups))
- self.assertEqual(
- 60,
- self.fake_auto_scale.groups['0'].kwargs['timeout_mins'])
-
- def test_delete(self):
- """Deleting a ScalingGroup resource invokes pyrax API to delete it."""
- self._setup_test_stack()
- resource = self.stack['my_group']
- scheduler.TaskRunner(resource.delete)()
- self.assertEqual({}, self.fake_auto_scale.groups)
-
- def test_delete_without_backing_group(self):
- """Resource deletion succeeds, if no backing scaling group exists."""
- self._setup_test_stack()
- resource = self.stack['my_group']
- del self.fake_auto_scale.groups['0']
- scheduler.TaskRunner(resource.delete)()
- self.assertEqual({}, self.fake_auto_scale.groups)
-
- def test_delete_waits_for_server_deletion(self):
- """Test case for waiting for successful resource deletion.
-
- The delete operation may fail until the servers are really gone; the
- resource retries until success.
- """
- self._setup_test_stack()
- delete_counter = itertools.count()
-
- def delete(group_id):
- count = next(delete_counter)
- if count < 3:
- raise auto_scale.Forbidden("Not empty!")
-
- self.patchobject(self.fake_auto_scale, 'delete', side_effect=delete)
- resource = self.stack['my_group']
- scheduler.TaskRunner(resource.delete)()
- # It really called delete until it succeeded:
- self.assertEqual(4, next(delete_counter))
-
- def test_delete_blows_up_on_other_errors(self):
- """Test case for correct error handling during deletion.
-
- Only the Forbidden (403) error is honored as an indicator of pending
- deletion; other errors cause deletion to fail.
- """
- self._setup_test_stack()
-
- def delete(group_id):
- 1 / 0
-
- self.patchobject(self.fake_auto_scale, 'delete', side_effect=delete)
- resource = self.stack['my_group']
- err = self.assertRaises(
- exception.ResourceFailure, scheduler.TaskRunner(resource.delete))
- self.assertIsInstance(err.exc, ZeroDivisionError)
-
-
-class PolicyTest(common.HeatTestCase):
- policy_template = template_format.parse('''
- HeatTemplateFormatVersion: "2012-12-12"
- Description: "Rackspace Auto Scale"
- Parameters: {}
- Resources:
- my_policy:
- Type: Rackspace::AutoScale::ScalingPolicy
- Properties:
- group: "my-group-id"
- name: "+10 on webhook"
- change: 10
- cooldown: 0
- type: "webhook"
- ''')
-
- def setUp(self):
- super(PolicyTest, self).setUp()
- for res_name, res_class in auto_scale.resource_mapping().items():
- resource._register_class(res_name, res_class)
- self.fake_auto_scale = FakeAutoScale()
- self.patchobject(auto_scale.ScalingPolicy, 'auto_scale',
- return_value=self.fake_auto_scale)
-
- def _setup_test_stack(self, template):
- self.stack = utils.parse_stack(template)
- self.stack.create()
- self.assertEqual(
- ('CREATE', 'COMPLETE'), self.stack.state,
- self.stack.status_reason)
-
- def test_create_webhook_change(self):
- """Creating the resource creates the scaling policy with pyrax.
-
- Also sets the resource's ID to {group_id}:{policy_id}.
- """
- self._setup_test_stack(self.policy_template)
- resource = self.stack['my_policy']
- self.assertEqual('my-group-id:0', resource.FnGetRefId())
- self.assertEqual(
- {
- 'name': '+10 on webhook',
- 'scaling_group': 'my-group-id',
- 'change': 10,
- 'cooldown': 0,
- 'policy_type': 'webhook'},
- self.fake_auto_scale.policies['0'].kwargs)
-
- def test_webhook_change_percent(self):
- """Test case for specified changePercent.
-
- When changePercent is specified, it translates to pyrax arguments
- 'change' and 'is_percent'.
- """
- template = copy.deepcopy(self.policy_template)
- template['Resources']['my_policy']['Properties']['changePercent'] = 10
- del template['Resources']['my_policy']['Properties']['change']
- self._setup_test_stack(template)
- self.assertEqual(
- {
- 'name': '+10 on webhook',
- 'scaling_group': 'my-group-id',
- 'change': 10,
- 'is_percent': True,
- 'cooldown': 0,
- 'policy_type': 'webhook'},
- self.fake_auto_scale.policies['0'].kwargs)
-
- def test_webhook_desired_capacity(self):
- """Test case for desiredCapacity property.
-
- The desiredCapacity property translates to the desired_capacity pyrax
- argument.
- """
- template = copy.deepcopy(self.policy_template)
- template['Resources']['my_policy']['Properties']['desiredCapacity'] = 1
- del template['Resources']['my_policy']['Properties']['change']
- self._setup_test_stack(template)
- self.assertEqual(
- {
- 'name': '+10 on webhook',
- 'scaling_group': 'my-group-id',
- 'desired_capacity': 1,
- 'cooldown': 0,
- 'policy_type': 'webhook'},
- self.fake_auto_scale.policies['0'].kwargs)
-
- def test_schedule(self):
- """We can specify schedule-type policies with args."""
- template = copy.deepcopy(self.policy_template)
- props = template['Resources']['my_policy']['Properties']
- props['type'] = 'schedule'
- props['args'] = {'cron': '0 0 0 * *'}
- self._setup_test_stack(template)
- self.assertEqual(
- {
- 'name': '+10 on webhook',
- 'scaling_group': 'my-group-id',
- 'change': 10,
- 'cooldown': 0,
- 'policy_type': 'schedule',
- 'args': {'cron': '0 0 0 * *'}},
- self.fake_auto_scale.policies['0'].kwargs)
-
- def test_update(self):
- """Updating the resource calls appropriate update method with pyrax."""
- self._setup_test_stack(self.policy_template)
- resource = self.stack['my_policy']
- uprops = copy.deepcopy(dict(resource.properties.data))
- uprops['changePercent'] = 50
- del uprops['change']
- template = rsrc_defn.ResourceDefinition(resource.name,
- resource.type(),
- uprops)
-
- scheduler.TaskRunner(resource.update, template)()
- self.assertEqual(
- {
- 'name': '+10 on webhook',
- 'scaling_group': 'my-group-id',
- 'change': 50,
- 'is_percent': True,
- 'cooldown': 0,
- 'policy_type': 'webhook'},
- self.fake_auto_scale.policies['0'].kwargs)
-
- def test_delete(self):
- """Deleting the resource deletes the policy with pyrax."""
- self._setup_test_stack(self.policy_template)
- resource = self.stack['my_policy']
- scheduler.TaskRunner(resource.delete)()
- self.assertEqual({}, self.fake_auto_scale.policies)
-
- def test_delete_policy_non_existent(self):
- """Test case for deleting resource without backing policy.
-
- Deleting a resource for which there is no backing policy succeeds
- silently.
- """
- self._setup_test_stack(self.policy_template)
- resource = self.stack['my_policy']
- del self.fake_auto_scale.policies['0']
- scheduler.TaskRunner(resource.delete)()
- self.assertEqual({}, self.fake_auto_scale.policies)
-
-
-class WebHookTest(common.HeatTestCase):
- webhook_template = template_format.parse('''
- HeatTemplateFormatVersion: "2012-12-12"
- Description: "Rackspace Auto Scale"
- Parameters: {}
- Resources:
- my_webhook:
- Type: Rackspace::AutoScale::WebHook
- Properties:
- policy: my-group-id:my-policy-id
- name: "exec my policy"
- metadata:
- a: b
- ''')
-
- def setUp(self):
- super(WebHookTest, self).setUp()
- for res_name, res_class in auto_scale.resource_mapping().items():
- resource._register_class(res_name, res_class)
- self.fake_auto_scale = FakeAutoScale()
- self.patchobject(auto_scale.WebHook, 'auto_scale',
- return_value=self.fake_auto_scale)
-
- def _setup_test_stack(self, template):
- self.stack = utils.parse_stack(template)
- self.stack.create()
- self.assertEqual(
- ('CREATE', 'COMPLETE'), self.stack.state,
- self.stack.status_reason)
-
- def test_create(self):
- """Creates a webhook with pyrax and makes attributes available."""
- self._setup_test_stack(self.webhook_template)
- resource = self.stack['my_webhook']
- self.assertEqual(
- {
- 'name': 'exec my policy',
- 'scaling_group': 'my-group-id',
- 'policy': 'my-policy-id',
- 'metadata': {'a': 'b'}},
- self.fake_auto_scale.webhooks['0'].kwargs)
- self.assertEqual("self-url", resource.FnGetAtt("executeUrl"))
- self.assertEqual("capability-url", resource.FnGetAtt("capabilityUrl"))
-
- def test_failed_create(self):
- """When a create fails, getting the attributes returns None."""
- template = copy.deepcopy(self.webhook_template)
- template['Resources']['my_webhook']['Properties']['policy'] = 'foobar'
- self.stack = utils.parse_stack(template)
- self.stack.create()
- resource = self.stack['my_webhook']
- self.assertIsNone(resource.FnGetAtt('capabilityUrl'))
-
- def test_update(self):
- self._setup_test_stack(self.webhook_template)
- resource = self.stack['my_webhook']
- uprops = copy.deepcopy(dict(resource.properties.data))
- uprops['metadata']['a'] = 'different!'
- uprops['name'] = 'newhook'
- template = rsrc_defn.ResourceDefinition(resource.name,
- resource.type(),
- uprops)
-
- scheduler.TaskRunner(resource.update, template)()
- self.assertEqual(
- {
- 'name': 'newhook',
- 'scaling_group': 'my-group-id',
- 'policy': 'my-policy-id',
- 'metadata': {'a': 'different!'}},
- self.fake_auto_scale.webhooks['0'].kwargs)
-
- def test_delete(self):
- """Deleting the resource deletes the webhook with pyrax."""
- self._setup_test_stack(self.webhook_template)
- resource = self.stack['my_webhook']
- scheduler.TaskRunner(resource.delete)()
- self.assertEqual({}, self.fake_auto_scale.webhooks)
-
- def test_delete_without_backing_webhook(self):
- """Test case for deleting resource without backing webhook.
-
- Deleting a resource for which there is no backing webhook succeeds
- silently.
- """
- self._setup_test_stack(self.webhook_template)
- resource = self.stack['my_webhook']
- del self.fake_auto_scale.webhooks['0']
- scheduler.TaskRunner(resource.delete)()
- self.assertEqual({}, self.fake_auto_scale.webhooks)
-
-
-@mock.patch.object(resource.Resource, "client_plugin")
-@mock.patch.object(resource.Resource, "client")
-class AutoScaleGroupValidationTests(common.HeatTestCase):
- def setUp(self):
- super(AutoScaleGroupValidationTests, self).setUp()
- self.mockstack = mock.Mock()
- self.mockstack.has_cache_data.return_value = False
- self.mockstack.db_resource_get.return_value = None
-
- def test_validate_no_rcv3_pool(self, mock_client, mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_server",
- "args": {
- "loadBalancers": [{
- "loadBalancerId": 'not integer!',
- }],
- "server": {
- "name": "sdfsdf",
- "flavorRef": "ffdgdf",
- "imageRef": "image-ref",
- },
- },
- },
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- mock_client().list_load_balancer_pools.return_value = []
- error = self.assertRaises(
- exception.StackValidationFailed, asg.validate)
- self.assertEqual(
- 'Could not find RackConnectV3 pool with id not integer!: ',
- six.text_type(error))
-
- def test_validate_rcv3_pool_found(self, mock_client, mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_server",
- "args": {
- "loadBalancers": [{
- "loadBalancerId": 'pool_exists',
- }],
- "server": {
- "name": "sdfsdf",
- "flavorRef": "ffdgdf",
- "imageRef": "image-ref",
- },
- },
- },
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- mock_client().list_load_balancer_pools.return_value = [
- mock.Mock(id='pool_exists'),
- ]
- self.assertIsNone(asg.validate())
-
- def test_validate_no_lb_specified(self, mock_client, mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_server",
- "args": {
- "server": {
- "name": "sdfsdf",
- "flavorRef": "ffdgdf",
- "imageRef": "image-ref",
- },
- },
- },
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- self.assertIsNone(asg.validate())
-
- def test_validate_launch_stack(self, mock_client, mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_stack",
- "args": {
- "stack": {
- 'template': (
- '''heat_template_version: 2015-10-15
-description: This is a Heat template
-parameters:
- image:
- default: cirros-0.3.4-x86_64-uec
- type: string
- flavor:
- default: m1.tiny
- type: string
-resources:
- rand:
- type: OS::Heat::RandomString
-'''),
- 'template_url': None,
- 'disable_rollback': False,
- 'environment': {
- 'parameters': {
- 'image':
- 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
- },
- 'resource_registry': {
- 'Heat::InstallConfigAgent': (
- 'https://myhost.com/bootconfig.yaml')
- }
- },
- 'files': {
- 'fileA.yaml': 'Contents of the file',
- 'file:///usr/fileB.yaml': 'Contents of the file'
- },
- 'parameters': {
- 'flavor': '4 GB Performance',
- },
- 'timeout_mins': 30,
- }
- }
- }
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- self.assertIsNone(asg.validate())
-
- def test_validate_launch_server_and_stack(self, mock_client, mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_server",
- "args": {
- "server": {
- "name": "sdfsdf",
- "flavorRef": "ffdgdf",
- "imageRef": "image-ref",
- },
- "stack": {
- 'template': (
- '''heat_template_version: 2015-10-15
-description: This is a Heat template
-parameters:
- image:
- default: cirros-0.3.4-x86_64-uec
- type: string
- flavor:
- default: m1.tiny
- type: string
-resources:
- rand:
- type: OS::Heat::RandomString
-'''),
- 'template_url': None,
- 'disable_rollback': False,
- 'environment': {
- 'parameters': {
- 'image':
- 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
- },
- 'resource_registry': {
- 'Heat::InstallConfigAgent': (
- 'https://myhost.com/bootconfig.yaml')
- }
- },
- 'files': {
- 'fileA.yaml': 'Contents of the file',
- 'file:///usr/fileB.yaml': 'Contents of the file'
- },
- 'parameters': {
- 'flavor': '4 GB Performance',
- },
- 'timeout_mins': 30,
- }
- }
- }
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- error = self.assertRaises(
- exception.StackValidationFailed, asg.validate)
- self.assertIn(
- 'Must provide one of server or stack in launchConfiguration',
- six.text_type(error))
-
- def test_validate_no_launch_server_or_stack(self, mock_client,
- mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_server",
- "args": {}
- }
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- error = self.assertRaises(
- exception.StackValidationFailed, asg.validate)
- self.assertIn(
- 'Must provide one of server or stack in launchConfiguration',
- six.text_type(error))
-
- def test_validate_stack_template_and_template_url(self, mock_client,
- mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_server",
- "args": {
- "stack": {
- 'template': (
- '''heat_template_version: 2015-10-15
-description: This is a Heat template
-parameters:
- image:
- default: cirros-0.3.4-x86_64-uec
- type: string
- flavor:
- default: m1.tiny
- type: string
-resources:
- rand:
- type: OS::Heat::RandomString
-'''),
- 'template_url': 'https://myhost.com/template.yaml',
- }
- }
- }
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- error = self.assertRaises(
- exception.StackValidationFailed, asg.validate)
- self.assertIn(
- 'Must provide one of template or template_url',
- six.text_type(error))
-
- def test_validate_stack_no_template_or_template_url(self, mock_client,
- mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_server",
- "args": {
- "stack": {
- 'disable_rollback': False,
- 'environment': {
- 'parameters': {
- 'image':
- 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
- },
- 'resource_registry': {
- 'Heat::InstallConfigAgent': (
- 'https://myhost.com/bootconfig.yaml')
- }
- },
- 'files': {
- 'fileA.yaml': 'Contents of the file',
- 'file:///usr/fileB.yaml': 'Contents of the file'
- },
- 'parameters': {
- 'flavor': '4 GB Performance',
- },
- 'timeout_mins': 30,
- }
- }
- }
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- error = self.assertRaises(
- exception.StackValidationFailed, asg.validate)
- self.assertIn(
- 'Must provide one of template or template_url',
- six.text_type(error))
-
- def test_validate_invalid_template(self, mock_client, mock_plugin):
- asg_properties = {
- "groupConfiguration": {
- "name": "My Group",
- "cooldown": 60,
- "minEntities": 1,
- "maxEntities": 25,
- "metadata": {
- "group": "metadata",
- },
- },
- "launchConfiguration": {
- "type": "launch_stack",
- "args": {
- "stack": {
- 'template': (
- '''SJDADKJAJKLSheat_template_version: 2015-10-15
-description: This is a Heat template
-parameters:
- image:
- default: cirros-0.3.4-x86_64-uec
- type: string
- flavor:
- default: m1.tiny
- type: string
-resources:
- rand:
- type: OS::Heat::RandomString
-'''),
- 'template_url': None,
- 'disable_rollback': False,
- 'environment': {'Foo': 'Bar'},
- 'files': {
- 'fileA.yaml': 'Contents of the file',
- 'file:///usr/fileB.yaml': 'Contents of the file'
- },
- 'parameters': {
- 'flavor': '4 GB Performance',
- },
- 'timeout_mins': 30,
- }
- }
- }
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", auto_scale.Group, properties=asg_properties)
- asg = auto_scale.Group("test", rsrcdef, self.mockstack)
-
- error = self.assertRaises(
- exception.StackValidationFailed, asg.validate)
- self.assertIn(
- 'Encountered error while loading template:',
- six.text_type(error))
diff --git a/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py b/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py
deleted file mode 100644
index 083753503..000000000
--- a/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py
+++ /dev/null
@@ -1,2202 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import copy
-import json
-import uuid
-
-import mock
-import mox
-import six
-
-from heat.common import exception
-from heat.common import template_format
-from heat.engine import resource
-from heat.engine import rsrc_defn
-from heat.engine import scheduler
-from heat.tests import common
-from heat.tests import utils
-
-from ..resources import cloud_loadbalancer as lb # noqa
-
-# The following fakes are for pyrax
-
-
-cert = """\n-----BEGIN CERTIFICATE-----
-MIIFBjCCAu4CCQDWdcR5LY/+/jANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB
-VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0
-cyBQdHkgTHRkMB4XDTE0MTAxNjE3MDYxNVoXDTE1MTAxNjE3MDYxNVowRTELMAkG
-A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0
-IFdpZGdpdHMgUHR5IEx0ZDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
-AMm5NcP0tMKHblT6Ud1k8TxZ9/8uOHwUNPbvFsvSyCupj0J0vGCTjbuC2I5T/CXR
-tnLEIt/EarlNAqcjbDCWtSyEKs3zDmmkreoIDEa8pyAQ2ycsCXGMxDN97F3/wlLZ
-agUNM0FwGHLZWBg62bM6l+bpTUcX0PqSyv/aVMhJ8EPDX0Dx1RYsVwUzIe/HWC7x
-vCmtDApAp1Fwq7AwlRaKU17sGwPWJ8+I8PyouBdqNuslHm7LQ0XvBA5DfkQA6feB
-ZeJIyOtctM9WFWQI5fKOsyt5P306B3Zztw9VZLAmZ8qHex+R1WY1zXxDAwKEQz/X
-8bRqMA/VU8OxJcK0AmY/1v/TFmAlRh2XBCIc+5UGtCcftWvZJAsKur8Hg5pPluGv
-ptyqSgSsSKtOVWkyTANP1LyOkpBA8Kmkeo2CKXu1SCFypY5Q6E+Fy8Y8RaHJPvzR
-NHcm1tkBvHOKyRso6FjvxuJEyIC9EyUK010nwQm7Qui11VgCSHBoaKVvkIbFfQdK
-aCes0oQO5dqY0+fC/IFDhrxlvSd2Wk7KjuNjNu9kVN9Ama2pRTxhYKaN+GsHfoL7
-ra6G9HjbUVULAdjCko3zOKEUzFLLf1VZYk7hDhyv9kovk0b8sr5WowxW7+9Wy0NK
-WL5f2QgVCcoHw9bGhyuYQCdBfztNmKOWe9pGj6bQAx4pAgMBAAEwDQYJKoZIhvcN
-AQEFBQADggIBALFSj3G2TEL/UWtNcPeY2fbxSGBrboFx3ur8+zTkdZzvfC8H9/UK
-w0aRH0rK4+lKYDqF6A9bUHP17DaJm1lF9In38VVMOuur0ehUIn1S2U3OvlDLN68S
-p5D4wGKMcUfUQ6pzhSKJCMvGX561TKHCc5fZhPruy75Xq2DcwJENE189foKLFvJs
-ca4sIARqP6v1vfARcfH5leSsdIq8hy6VfL0BRATXfNHZh4SNbyDJYYTxrEUPHYXW
-pzW6TziZXYNMG2ZRdHF/mDJuFzw2EklOrPC9MySCZv2i9swnqyuwNYh/SAMhodTv
-ZDGy4nbjWNe5BflTMBceh45VpyTcnQulFhZQFwP79fK10BoDrOc1mEefhIqT+fPI
-LJepLOf7CSXtYBcWbmMCLHNh+PrlCiA1QMTyd/AC1vvoiyCbs3M419XbXcBSDEh8
-tACplmhf6z1vDkElWiDr8y0kujJ/Gie24iLTun6oHG+f+o6bbQ9w196T0olLcGx0
-oAYL0Olqli6cWHhraVAzZ5t5PH4X9TiESuQ+PMjqGImCIUscXY4objdnB5dfPHoz
-eF5whPl36/GK8HUixCibkCyqEOBBuNqhOz7nVLM0eg5L+TE5coizEBagxVCovYSj
-fQ9zkIgaC5oeH6L0C1FFG1vRNSWokheBk14ztVoJCJyFr6p0/6pD7SeR
------END CERTIFICATE-----\n"""
-
-private_key = """\n-----BEGIN PRIVATE KEY-----
-MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDJuTXD9LTCh25U
-+lHdZPE8Wff/Ljh8FDT27xbL0sgrqY9CdLxgk427gtiOU/wl0bZyxCLfxGq5TQKn
-I2wwlrUshCrN8w5ppK3qCAxGvKcgENsnLAlxjMQzfexd/8JS2WoFDTNBcBhy2VgY
-OtmzOpfm6U1HF9D6ksr/2lTISfBDw19A8dUWLFcFMyHvx1gu8bwprQwKQKdRcKuw
-MJUWilNe7BsD1ifPiPD8qLgXajbrJR5uy0NF7wQOQ35EAOn3gWXiSMjrXLTPVhVk
-COXyjrMreT99Ogd2c7cPVWSwJmfKh3sfkdVmNc18QwMChEM/1/G0ajAP1VPDsSXC
-tAJmP9b/0xZgJUYdlwQiHPuVBrQnH7Vr2SQLCrq/B4OaT5bhr6bcqkoErEirTlVp
-MkwDT9S8jpKQQPCppHqNgil7tUghcqWOUOhPhcvGPEWhyT780TR3JtbZAbxziskb
-KOhY78biRMiAvRMlCtNdJ8EJu0LotdVYAkhwaGilb5CGxX0HSmgnrNKEDuXamNPn
-wvyBQ4a8Zb0ndlpOyo7jYzbvZFTfQJmtqUU8YWCmjfhrB36C+62uhvR421FVCwHY
-wpKN8zihFMxSy39VWWJO4Q4cr/ZKL5NG/LK+VqMMVu/vVstDSli+X9kIFQnKB8PW
-xocrmEAnQX87TZijlnvaRo+m0AMeKQIDAQABAoICAA8DuBrDxgiMqAuvLhS6hLIn
-SCw4NoAVyPNwTFQTdk65qi4aHkNZ+DyyuoetfKEcAOZ97tKU/hSYxM/H9S+QqB+O
-HtmBc9stJLy8qJ1DQXVDi+xYfMN05M2oW8WLWd1szVVe7Ce8vjUeNE5pYvbSL6hC
-STw3a5ibAH0WtSTLTBTfH+HnniKuXjPG4InGXqvv1j+L38+LjGilaEIO+6nX1ejE
-ziX09LWfzcAglsM3ZqsN8jvw6Sr1ZWniYC2Tm9aOTRUQsdPC7LpZ//GYL/Vj5bYg
-qjcZ8KBCcKe1hW8PDL6oYuOwqR+YdZkAK+MuEQtZeWYiWT10dW2la9gYKe2OZuQ1
-7q3zZ6zLP+XP+0N7DRMTTuk2gurBVX7VldzIzvjmW8X+8Q5QO+EAqKr2yordK3S1
-uYcKmyL4Nd6rSFjRo0zSqHMNOyKt3b1r3m/eR2W623rT5uTjgNYpiwCNxnxmcjpK
-Sq7JzZKz9NLbEKQWsP9gQ3G6pp3XfLtoOHEDkSKMmQxd8mzK6Ja/9iC+JGqRTJN+
-STe1vL9L2DC7GnjOH1h2TwLoLtQWSGebf/GBxju0e5pAL0UYWBNjAwcpOoRU9J5J
-y9E7sNbbXTmK2rg3B/5VKGQckBWfurg7CjAmHGgz9xxceJQLKvT1O5zHZc+v4TVB
-XDZjtz8L2k3wFLDynDY5AoIBAQDm2fFgx4vk+gRFXPoLNN34Jw2fT+xuwD/H7K0e
-0Cas0NfyNil/Kbp+rhMHuVXTt86BIY+z8GO4wwn+YdDgihBwobAh2G9T/P6wNm+Q
-NcIeRioml8V/CP7lOQONQJ6sLTRYnNLfB96uMFe+13DO/PjFybee5VflfBUrJK1M
-DqRLwm9wEIf5p0CWYI/ZJaDNN71B09BB/jdT/e7Ro1hXHlq3W4tKqRDPfuUqwy3H
-ocYQ1SUk3oFdSiYFd6PijNkfTnrtyToa0xUL9uGL+De1LfgV+uvqkOduQqnpm/5+
-XQC1qbTUjq+4WEsuPjYf2E0WAVFGzwzWcdb0LnMIUJHwPvpLAoIBAQDfsvCZlcFM
-nGBk1zUnV3+21CPK+5+X3zLHr/4otQHlGMFL6ZiQManvKMX6a/cT3rG+LvECcXGD
-jSsTu7JIt9l8VTpbPaS76htTmQYaAZERitBx1C8zDMuI2O4bjFLUGUX73RyTZdRm
-G68IX+7Q7SL8zr/fHjcnk+3yj0L1soAVPC7lY3se7vQ/SCre97E+noP5yOhrpnRt
-dij7NYy79xcvUZfc/z0//Ia4JSCcIvv2HO7JZIPzUCVO4sjbUOGsgR9pwwQkwYeP
-b5P0MVaPgFnOgo/rz6Uqe+LpeY83SUwc2q8W8bskzTLZEnwSV5bxCY+gIn9KCZSG
-8QxuftgIiQDbAoIBAQDQ2oTC5kXulzOd/YxK7z2S8OImLAzf9ha+LaZCplcXKqr0
-e4P3hC0xxxN4fXjk3vp5YX+9b9MIqYw1FRIA02gkPmQ3erTd65oQmm88rSY+dYRU
-/iKz19OkVnycIsZrR0qAkQFGvrv8I8h+5DMvUTdQ2jrCCwQGnsgYDEqs8OI7mGFx
-pcMfXu3UHvCFqMFeaPtUvuk/i1tLJgYWrA2UY+X21V+j4GlREKEMmyCj5/xl5jCA
-tr2bRSY49BDVOlCFPl+BGfjzo9z6whU0qRDdXgWA/U7LHOYEn1NSAsuwTzwBHtR3
-KdBYm6kI4Ufeb7buHasGwPQAX2X17MAt2ZbvIEsZAoIBAQC4g5dzh5PGhmH4K48b
-YU/l1TukzUIJekAfd+ozV4I1nuKppAeEQILD0yTh9zX4vMJtdbiz5DDWapWylCpt
-UsBgjsgwxDriCSr7HIhs4QfwqUhf67325MHpoc1dCbS0YBhatDpC1kaI5qLMTJzm
-1gL69epLtleWHK2zWjnIAbEmUtr3uMOwczciD3vVKAeZ+BQx72bOjKESPNl2w+fO
-jvQfwrR5xEqYQco5j95DC5Q6oAjSM0enZV8wn10/kYpjyKnJieMcEkmnpUgrrpqQ
-iTUKYqUlw8OftEopfGwGFT5junmbek57/4nGhTmzw22sac9/LZVC034ghClV5uh4
-udDrAoIBAQCJHfBPJmJMT/WtSATTceVDgZiyezWNgH2yLJMqDP6sEuImnLAg2L9M
-Yc6LqMcHLj7CyXfy2AEAuYTZwXFSRmVKl6Ycad7sS/hIL1ykvDveRU9VNImexDBq
-AJR4GKr6jbRZnBztnRYZTsGA+TcrFc6SwdSPXgz7JQT9uw+JkhLi59m141XBdeRc
-NQ/LFgOaxjvRUID81izQaYEyADId7asy+2QVazMDafuALJ23WSUMSXajCXaC6/7N
-53RWrOAb+kFRgjuHM8pQkpgnY/Ds0MZxpakFw3Y7PAEL99xyYdR+rE3JOMjPlgr0
-LpTt0Xs1OFZxaNpolW5Qis4os7UmmIRV
------END PRIVATE KEY-----\n"""
-
-
-class FakeException(Exception):
- pass
-
-
-class FakeClient(object):
- user_agent = "Fake"
- USER_AGENT = "Fake"
-
-
-class FakeManager(object):
- api = FakeClient()
-
- def list(self):
- pass
-
- def get(self, item):
- pass
-
- def delete(self, item):
- pass
-
- def create(self, *args, **kwargs):
- pass
-
- def find(self, *args, **kwargs):
- pass
-
- def action(self, item, action_type, body=None):
- pass
-
-
-class FakeLoadBalancerManager(object):
- def __init__(self, api=None, *args, **kwargs):
- pass
-
- def set_content_caching(self, *args, **kwargs):
- pass
-
-
-class FakeNode(object):
- def __init__(self, address=None, port=None, condition=None, weight=None,
- status=None, parent=None, type=None, id=None):
- if not (address and port):
- # This mimics the check that pyrax does on Node instantiation
- raise TypeError("You must include an address and "
- "a port when creating a node.")
- self.address = address
- self.port = port
- self.condition = condition
- self.weight = weight
- self.status = status
- self.parent = parent
- self.type = type
- self.id = id
-
- def __eq__(self, other):
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def update(self):
- pass
-
- def delete(self):
- pass
-
-
-class FakeVirtualIP(object):
- def __init__(self, address=None, port=None, condition=None,
- ipVersion=None, type=None, id=None):
- self.address = address
- self.port = port
- self.condition = condition
- self.ipVersion = ipVersion
- self.type = type
- self.id = id
- self.ip_version = ipVersion
-
- def __eq__(self, other):
- return self.__dict__ == other.__dict__
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class FakeLoadBalancerClient(object):
- def __init__(self, *args, **kwargs):
- self.Node = FakeNode
- self.VirtualIP = FakeVirtualIP
- pass
-
- def get(self, *args, **kwargs):
- pass
-
- def create(self, *args, **kwargs):
- pass
-
-
-class FakeLoadBalancer(object):
- def __init__(self, name=None, info=None, *args, **kwargs):
- name = name or uuid.uuid4()
- info = info or {"fake": "fake"}
- self.id = uuid.uuid4()
- self.manager = FakeLoadBalancerManager()
- self.Node = FakeNode
- self.VirtualIP = FakeVirtualIP
- self.nodes = []
- self.algorithm = "ROUND_ROBIN"
- self.session_persistence = "HTTP_COOKIE"
- self.connection_logging = False
- self.timeout = None
- self.httpsRedirect = False
- self.protocol = None
- self.port = None
- self.name = None
- self.halfClosed = None
- self.content_caching = False
-
- def get(self, *args, **kwargs):
- pass
-
- def add_nodes(self, *args, **kwargs):
- pass
-
- def add_ssl_termination(self, *args, **kwargs):
- pass
-
- def set_error_page(self, *args, **kwargs):
- pass
-
- def clear_error_page(self, *args, **kwargs):
- pass
-
- def add_access_list(self, *args, **kwargs):
- pass
-
- def update(self, *args, **kwargs):
- pass
-
- def add_health_monitor(self, *args, **kwargs):
- pass
-
- def delete_health_monitor(self, *args, **kwargs):
- pass
-
- def delete_ssl_termination(self, *args, **kwargs):
- pass
-
- def set_metadata(self, *args, **kwargs):
- pass
-
- def delete_metadata(self, *args, **kwargs):
- pass
-
- def add_connection_throttle(self, *args, **kwargs):
- pass
-
- def delete_connection_throttle(self, *args, **kwargs):
- pass
-
- def delete(self, *args, **kwargs):
- pass
-
- def get_health_monitor(self, *args, **kwargs):
- return {}
-
- def get_metadata(self, *args, **kwargs):
- return {}
-
- def get_error_page(self, *args, **kwargs):
- pass
-
- def get_connection_throttle(self, *args, **kwargs):
- pass
-
- def get_ssl_termination(self, *args, **kwargs):
- pass
-
- def get_access_list(self, *args, **kwargs):
- pass
-
-
-class LoadBalancerWithFakeClient(lb.CloudLoadBalancer):
- def cloud_lb(self):
- return FakeLoadBalancerClient()
-
-
-def override_resource():
- return {
- 'Rackspace::Cloud::LoadBalancer': LoadBalancerWithFakeClient
- }
-
-
-class LoadBalancerTest(common.HeatTestCase):
-
- def setUp(self):
- super(LoadBalancerTest, self).setUp()
-
- self.lb_props = {
- "name": "test-clb",
- "nodes": [{"addresses": ["166.78.103.141"],
- "port": 80,
- "condition": "ENABLED"}],
- "protocol": "HTTP",
- "port": 80,
- "virtualIps": [
- {"type": "PUBLIC", "ipVersion": "IPV6"}],
- "algorithm": 'LEAST_CONNECTIONS',
- "connectionThrottle": {'maxConnectionRate': 1000},
- 'timeout': 110,
- 'contentCaching': 'DISABLED'
- }
-
- self.lb_template = {
- "AWSTemplateFormatVersion": "2010-09-09",
- "Description": "fawef",
- "Resources": {
- self._get_lb_resource_name(): {
- "Type": "Rackspace::Cloud::LoadBalancer",
- "Properties": self.lb_props,
- }
- }
- }
-
- self.lb_name = 'test-clb'
- self.expected_body = {
- "nodes": [FakeNode(address=u"166.78.103.141", port=80,
- condition=u"ENABLED", type=u"PRIMARY",
- weight=1)],
- "protocol": u'HTTP',
- "port": 80,
- "virtual_ips": [FakeVirtualIP(type=u"PUBLIC", ipVersion=u"IPV6")],
- "algorithm": u'LEAST_CONNECTIONS',
- "connectionThrottle": {'maxConnectionRate': 1000,
- 'maxConnections': None,
- 'rateInterval': None,
- 'minConnections': None},
- "connectionLogging": None,
- "halfClosed": None,
- "healthMonitor": None,
- "metadata": None,
- "sessionPersistence": None,
- "timeout": 110,
- "httpsRedirect": False
-
- }
-
- lb.resource_mapping = override_resource
- resource._register_class("Rackspace::Cloud::LoadBalancer",
- LoadBalancerWithFakeClient)
-
- def _get_lb_resource_name(self):
- return "lb-" + str(uuid.uuid4())
-
- def __getattribute__(self, name):
- if name == 'expected_body' or name == 'lb_template':
- return copy.deepcopy(super(LoadBalancerTest, self)
- .__getattribute__(name))
- return super(LoadBalancerTest, self).__getattribute__(name)
-
- def _mock_create(self, tmpl, stack, resource_name, lb_name, lb_body):
- resource_defns = tmpl.resource_definitions(stack)
- rsrc = LoadBalancerWithFakeClient(resource_name,
- resource_defns[resource_name],
- stack)
-
- fake_lb = FakeLoadBalancer(name=lb_name)
- fake_lb.status = 'ACTIVE'
- fake_lb.resource_id = 1234
-
- self.m.StubOutWithMock(rsrc.clb, 'create')
- rsrc.clb.create(lb_name, **lb_body).AndReturn(fake_lb)
-
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(
- fake_lb)
-
- return (rsrc, fake_lb)
-
- def _get_first_resource_name(self, templ):
- return next(k for k in templ['Resources'])
-
- def _mock_loadbalancer(self, lb_template, expected_name, expected_body):
- t = template_format.parse(json.dumps(lb_template))
- self.stack = utils.parse_stack(t, stack_name=utils.random_name())
-
- rsrc, fake_lb = self._mock_create(self.stack.t, self.stack,
- self.
- _get_first_resource_name(
- lb_template),
- expected_name,
- expected_body)
- return (rsrc, fake_lb)
-
- def _set_template(self, templ, **kwargs):
- for k, v in six.iteritems(kwargs):
- templ['Resources'][self._get_first_resource_name(templ)][
- 'Properties'][k] = v
- return templ
-
- def _set_expected(self, expected, **kwargs):
- for k, v in six.iteritems(kwargs):
- expected[k] = v
- return expected
-
- def test_process_node(self):
- nodes = [{'addresses': ['1234'], 'port': 80, 'enabled': True},
- {'addresses': ['4567', '8901', '8903'], 'port': 80,
- 'enabled': True},
- {'addresses': [], 'port': 80, 'enabled': True}]
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- expected_nodes = [{'address': '1234', 'port': 80, 'enabled': True},
- {'address': '4567', 'port': 80, 'enabled': True},
- {'address': '8901', 'port': 80, 'enabled': True},
- {'address': '8903', 'port': 80, 'enabled': True}]
- self.assertEqual(expected_nodes, list(rsrc._process_nodes(nodes)))
-
- def test_nodeless(self):
- """It's possible to create a LoadBalancer resource with no nodes."""
- template = self._set_template(self.lb_template,
- nodes=[])
- expected_body = copy.deepcopy(self.expected_body)
- expected_body['nodes'] = []
- rsrc, fake_lb = self._mock_loadbalancer(
- template, self.lb_name, expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_alter_properties(self):
- # test alter properties functions
- template = self._set_template(self.lb_template,
- sessionPersistence='HTTP_COOKIE',
- connectionLogging=True,
- metadata={'yolo': 'heeyyy_gurl'})
-
- expected = self._set_expected(self.expected_body,
- sessionPersistence={
- 'persistenceType': 'HTTP_COOKIE'},
- connectionLogging={'enabled': True},
- metadata=[
- {'key': 'yolo',
- 'value': 'heeyyy_gurl'}])
-
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_validate_vip(self):
- snippet = {
- "nodes": [],
- "protocol": 'HTTP',
- "port": 80,
- "halfClosed": None,
- "algorithm": u'LEAST_CONNECTIONS',
- "virtualIps": [{"id": "1234"}]
- }
- stack = mock.Mock()
- stack.db_resource_get.return_value = None
- stack.has_cache_data.return_value = False
- # happy path
- resdef = rsrc_defn.ResourceDefinition("testvip",
- lb.CloudLoadBalancer,
- properties=snippet)
- rsrc = lb.CloudLoadBalancer("testvip", resdef, stack)
- self.assertIsNone(rsrc.validate())
- # make sure the vip id prop is exclusive
- snippet["virtualIps"][0]["type"] = "PUBLIC"
- exc = self.assertRaises(exception.StackValidationFailed,
- rsrc.validate)
- self.assertIn("Cannot specify type or version", str(exc))
- # make sure you have to specify type and version if no id
- snippet["virtualIps"] = [{}]
- exc = self.assertRaises(exception.StackValidationFailed,
- rsrc.validate)
- self.assertIn("Must specify VIP type and version", str(exc))
-
- def test_validate_half_closed(self):
- # test failure (invalid protocol)
- template = self._set_template(self.lb_template, halfClosed=True)
- expected = self._set_expected(self.expected_body, halfClosed=True)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
- exc = self.assertRaises(exception.StackValidationFailed,
- rsrc.validate)
- self.assertIn('The halfClosed property is only available for the TCP'
- ' or TCP_CLIENT_FIRST protocols', str(exc))
-
- # test TCP protocol
- template = self._set_template(template, protocol='TCP')
- expected = self._set_expected(expected, protocol='TCP')
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
- self.assertIsNone(rsrc.validate())
-
- # test TCP_CLIENT_FIRST protocol
- template = self._set_template(template,
- protocol='TCP_CLIENT_FIRST')
- expected = self._set_expected(expected,
- protocol='TCP_CLIENT_FIRST')
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
- self.assertIsNone(rsrc.validate())
-
- def test_validate_health_monitor(self):
- # test connect success
- health_monitor = {
- 'type': 'CONNECT',
- 'attemptsBeforeDeactivation': 1,
- 'delay': 1,
- 'timeout': 1
- }
- template = self._set_template(self.lb_template,
- healthMonitor=health_monitor)
- expected = self._set_expected(self.expected_body,
- healthMonitor=health_monitor)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
-
- self.assertIsNone(rsrc.validate())
-
- # test connect failure
- # bodyRegex is only valid for type 'HTTP(S)'
- health_monitor['bodyRegex'] = 'dfawefawe'
- template = self._set_template(template,
- healthMonitor=health_monitor)
- expected = self._set_expected(expected,
- healthMonitor=health_monitor)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
- exc = self.assertRaises(exception.StackValidationFailed,
- rsrc.validate)
- self.assertIn('Unknown Property bodyRegex', str(exc))
-
- # test http fields
- health_monitor['type'] = 'HTTP'
- health_monitor['bodyRegex'] = 'bodyRegex'
- health_monitor['statusRegex'] = 'statusRegex'
- health_monitor['hostHeader'] = 'hostHeader'
- health_monitor['path'] = 'path'
-
- template = self._set_template(template,
- healthMonitor=health_monitor)
- expected = self._set_expected(expected,
- healthMonitor=health_monitor)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
- self.assertIsNone(rsrc.validate())
-
- def test_validate_ssl_termination(self):
- ssl_termination = {
- 'privatekey': 'ewfawe',
- 'intermediateCertificate': 'fwaefawe',
- 'secureTrafficOnly': True
- }
-
- # test ssl termination enabled without required fields failure
- template = self._set_template(self.lb_template,
- sslTermination=ssl_termination)
- expected = self._set_expected(self.expected_body,
- sslTermination=ssl_termination)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
-
- exc = self.assertRaises(exception.StackValidationFailed, rsrc.validate)
- self.assertIn("Property certificate not assigned", six.text_type(exc))
-
- ssl_termination['certificate'] = 'dfaewfwef'
- template = self._set_template(template,
- sslTermination=ssl_termination)
- expected = self._set_expected(expected,
- sslTermination=ssl_termination)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
- self.assertIsNone(rsrc.validate())
-
- def test_ssl_termination_unstripped_certificates(self):
- ssl_termination_template = {
- 'securePort': 443,
- 'privatekey': 'afwefawe',
- 'certificate': ' \nfawefwea\n ',
- 'intermediateCertificate': "\n\nintermediate_certificate\n",
- 'secureTrafficOnly': False
- }
- ssl_termination_api = copy.deepcopy(ssl_termination_template)
-
- template = self._set_template(self.lb_template,
- sslTermination=ssl_termination_template)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
- self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
- fake_lb.get_ssl_termination().AndReturn({})
- fake_lb.get_ssl_termination().AndReturn({
- 'securePort': 443,
- 'certificate': 'fawefwea',
- 'intermediateCertificate': "intermediate_certificate",
- 'secureTrafficOnly': False,
- 'enabled': True,
- })
-
- self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
- fake_lb.add_ssl_termination(**ssl_termination_api)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_ssl_termination_intermediateCertificate_None(self):
- ssl_termination_template = {
- 'securePort': 443,
- 'privatekey': 'afwefawe',
- 'certificate': ' \nfawefwea\n ',
- 'intermediateCertificate': None,
- 'secureTrafficOnly': False
- }
-
- template = self._set_template(self.lb_template,
- sslTermination=ssl_termination_template)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
- self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
- fake_lb.get_ssl_termination().AndReturn({})
- fake_lb.get_ssl_termination().AndReturn({
- 'securePort': 443,
- 'certificate': 'fawefwea',
- 'secureTrafficOnly': False,
- 'enabled': True,
- })
-
- self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
- add_ssl_termination_args = {
- 'securePort': 443,
- 'privatekey': 'afwefawe',
- 'certificate': ' \nfawefwea\n ',
- 'intermediateCertificate': '',
- 'secureTrafficOnly': False
- }
- fake_lb.add_ssl_termination(**add_ssl_termination_args)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_post_creation_access_list(self):
- access_list = [{"address": '192.168.1.1/0',
- 'type': 'ALLOW'},
- {'address': '172.165.3.43',
- 'type': 'DENY'}]
- api_access_list = [{"address": '192.168.1.1/0', 'id': 1234,
- 'type': 'ALLOW'},
- {'address': '172.165.3.43', 'id': 3422,
- 'type': 'DENY'}]
-
- template = self._set_template(self.lb_template,
- accessList=access_list)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
- self.m.StubOutWithMock(fake_lb, 'get_access_list')
- fake_lb.get_access_list().AndReturn([])
- fake_lb.get_access_list().AndReturn(api_access_list)
-
- self.m.StubOutWithMock(fake_lb, 'add_access_list')
- fake_lb.add_access_list(access_list)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_ref_id(self):
- """The Reference ID of the resource is the resource ID."""
- template = self._set_template(self.lb_template)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- self.assertEqual(rsrc.resource_id, rsrc.FnGetRefId())
-
- def test_post_creation_error_page(self):
- error_page = "REALLY BIG ERROR"
-
- template = self._set_template(self.lb_template,
- errorPage=error_page)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
- self.m.StubOutWithMock(fake_lb, 'get_error_page')
- fake_lb.get_error_page().AndReturn({u'errorpage': {u'content': u''}})
- fake_lb.get_error_page().AndReturn(
- {u'errorpage': {u'content': error_page}})
-
- self.m.StubOutWithMock(fake_lb, 'set_error_page')
- fake_lb.set_error_page(error_page)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_post_creation_ssl_termination(self):
- ssl_termination_template = {
- 'securePort': 443,
- 'privatekey': 'afwefawe',
- 'certificate': 'fawefwea',
- 'intermediateCertificate': "intermediate_certificate",
- 'secureTrafficOnly': False
- }
- ssl_termination_api = copy.deepcopy(ssl_termination_template)
-
- template = self._set_template(self.lb_template,
- sslTermination=ssl_termination_template)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
- self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
- fake_lb.get_ssl_termination().AndReturn({})
- fake_lb.get_ssl_termination().AndReturn({
- 'securePort': 443,
- 'certificate': 'fawefwea',
- 'intermediateCertificate': "intermediate_certificate",
- 'secureTrafficOnly': False,
- 'enabled': True,
- })
-
- self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
- fake_lb.add_ssl_termination(**ssl_termination_api)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_post_creation_content_caching(self):
- template = self._set_template(self.lb_template,
- contentCaching='ENABLED')
- rsrc = self._mock_loadbalancer(template, self.lb_name,
- self.expected_body)[0]
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_check(self):
- stack = mock.Mock()
- stack.db_resource_get.return_value = None
- stack.has_cache_data.return_value = False
- resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)
- loadbalancer = lb.CloudLoadBalancer("test", resdef, stack)
- loadbalancer._add_event = mock.Mock()
- mock_cloud_lb = mock.Mock()
- mock_get = mock.Mock(return_value=mock_cloud_lb)
- loadbalancer.clb.get = mock_get
-
- mock_cloud_lb.status = 'ACTIVE'
- scheduler.TaskRunner(loadbalancer.check)()
- self.assertEqual('CHECK', loadbalancer.action)
- self.assertEqual('COMPLETE', loadbalancer.status)
-
- mock_cloud_lb.status = 'FOOBAR'
- exc = self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(loadbalancer.check))
- self.assertEqual('CHECK', loadbalancer.action)
- self.assertEqual('FAILED', loadbalancer.status)
- self.assertIn('FOOBAR', str(exc))
-
- mock_get.side_effect = lb.NotFound('boom')
- exc = self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(loadbalancer.check))
- self.assertEqual('CHECK', loadbalancer.action)
- self.assertEqual('FAILED', loadbalancer.status)
- self.assertIn('boom', str(exc))
-
- def test_update_add_node_by_address(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- fake_lb.nodes = self.expected_body['nodes']
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- expected_ip = '172.168.1.4'
- props['nodes'] = [
- {"addresses": ["166.78.103.141"],
- "port": 80,
- "condition": "ENABLED",
- "type": "PRIMARY",
- "weight": 1},
- {"addresses": [expected_ip],
- "port": 80,
- "condition": "ENABLED",
- "type": "PRIMARY",
- "weight": 1}]
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.nodes = [
- FakeNode(address=u"172.168.1.4", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"166.78.103.141", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- ]
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb, 'add_nodes')
- fake_lb.add_nodes([
- fake_lb.Node(address=expected_ip,
- port=80,
- condition='ENABLED',
- type="PRIMARY", weight=1)])
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_resolve_attr_noid(self):
- stack = mock.Mock()
- stack.db_resource_get.return_value = None
- stack.has_cache_data.return_value = False
- resdef = mock.Mock(spec=rsrc_defn.ResourceDefinition)
- lbres = lb.CloudLoadBalancer("test", resdef, stack)
- self.assertIsNone(lbres._resolve_attribute("PublicIp"))
-
- def test_resolve_attr_virtualips(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- fake_lb.virtual_ips = [FakeVirtualIP(address='1.2.3.4',
- type='PUBLIC',
- ipVersion="IPv6",
- id='test-id')]
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- expected = [{
- 'ip_version': 'IPv6',
- 'type': 'PUBLIC',
- 'id': 'test-id',
- 'address': '1.2.3.4'}]
- self.m.ReplayAll()
- self.assertEqual(expected, rsrc._resolve_attribute("virtualIps"))
- self.m.VerifyAll()
-
- def test_update_nodes_immutable(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- current_nodes = [
- FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"3.3.3.3", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1)
- ]
- fake_lb.nodes = current_nodes
- fake_lb.tracker = "fake_lb"
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- expected_ip = '4.4.4.4'
- props['nodes'] = [
- {"addresses": ["1.1.1.1"], "port": 80, "condition": "ENABLED",
- "type": "PRIMARY", "weight": 1},
- {"addresses": ["2.2.2.2"], "port": 80, "condition": "DISABLED",
- "type": "PRIMARY", "weight": 1},
- {"addresses": [expected_ip], "port": 80, "condition": "ENABLED",
- "type": "PRIMARY", "weight": 1}
- ]
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.status = "PENDING_UPDATE"
- fake_lb1.tracker = "fake_lb1"
-
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # ACTIVE
-
- # Add node `expected_ip`
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE
-
- fake_lb2 = copy.deepcopy(fake_lb1)
- fake_lb2.status = "ACTIVE"
- fake_lb2.nodes = [
- FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"3.3.3.3", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- ]
- fake_lb2.tracker = "fake_lb2"
-
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2) # ACTIVE
-
- # Delete node 3.3.3.3
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE
-
- fake_lb3 = copy.deepcopy(fake_lb2)
- fake_lb3.status = "ACTIVE"
- fake_lb3.nodes = [
- FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1)
- ]
- fake_lb3.tracker = "fake_lb3"
-
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3) # ACTIVE
-
- # Update node 2.2.2.2
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # PENDING_UPDATE
-
- fake_lb4 = copy.deepcopy(fake_lb3)
- fake_lb4.status = "ACTIVE"
- fake_lb4.nodes = [
- FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"2.2.2.2", port=80, condition=u"DISABLED",
- type="PRIMARY", weight=1),
- FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
- type="PRIMARY", weight=1)
- ]
- fake_lb4.tracker = "fake_lb4"
-
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb4) # ACTIVE
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_pending_update_status(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['name'] = "updated_name"
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.name = "updated_name"
- fake_lb1.status = "PENDING_UPDATE" # lb is immutable
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.name = "updated_name"
- fake_lb2.status = "ACTIVE"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_immutable_exception(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['name'] = "updated_name"
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # initial iteration
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb) # immutable
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.name = "updated_name"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1) # after update
-
- self.m.StubOutWithMock(fake_lb, 'update')
- msg = ("Load Balancer '%s' has a status of 'PENDING_UPDATE' and "
- "is considered immutable." % rsrc.resource_id)
- fake_lb.update(name="updated_name").AndRaise(Exception(msg))
- fake_lb.update(name="updated_name").AndReturn(None)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_create_immutable_exception(self):
- access_list = [{"address": '192.168.1.1/0',
- 'type': 'ALLOW'},
- {'address': '172.165.3.43',
- 'type': 'DENY'}]
-
- template = self._set_template(self.lb_template,
- accessList=access_list)
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
- self.m.StubOutWithMock(fake_lb, 'get_access_list')
- fake_lb.get_access_list().AndReturn({})
- fake_lb.get_access_list().AndReturn({})
- fake_lb.get_access_list().AndReturn(access_list)
-
- self.m.StubOutWithMock(fake_lb, 'add_access_list')
- msg = ("Load Balancer '%s' has a status of 'PENDING_UPDATE' and "
- "is considered immutable." % rsrc.resource_id)
- fake_lb.add_access_list(access_list).AndRaise(Exception(msg))
- fake_lb.add_access_list(access_list)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- def test_update_lb_name(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['name'] = "updated_name"
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.name = "updated_name"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb, 'update')
- fake_lb.update(name="updated_name")
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_lb_multiple(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['name'] = "updated_name"
- props['algorithm'] = "RANDOM"
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.name = "updated_name"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.algorithm = "RANDOM"
- fake_lb2.name = "updated_name"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
-
- self.m.StubOutWithMock(fake_lb, 'update')
- fake_lb.update(name="updated_name", algorithm="RANDOM")
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_lb_algorithm(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['algorithm'] = "RANDOM"
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.algorithm = "ROUND_ROBIN"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb1, 'update')
- fake_lb1.update(algorithm="RANDOM")
-
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.algorithm = "RANDOM"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_lb_protocol(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['protocol'] = "IMAPS"
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.protocol = "IMAPS"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb, 'update')
- fake_lb.update(protocol="IMAPS")
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_lb_redirect(self):
- template = self._set_template(
- self.lb_template, protocol="HTTPS")
-
- expected = self._set_expected(
- self.expected_body, protocol="HTTPS")
-
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(template['Resources'][rsrc.name]['Properties'])
- props['httpsRedirect'] = True
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.httpsRedirect = True
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb, 'update')
- fake_lb.update(httpsRedirect=True)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_lb_redirect_https(self):
- template = self._set_template(
- self.lb_template, protocol="HTTPS", httpsRedirect=True)
-
- expected = self._set_expected(
- self.expected_body, protocol="HTTPS", httpsRedirect=True)
-
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_lb_redirect_HTTP_with_SSL_term(self):
- ssl_termination_template = {
- 'privatekey': private_key,
- 'intermediateCertificate': 'fwaefawe',
- 'secureTrafficOnly': True,
- 'securePort': 443,
- 'certificate': cert
- }
- ssl_termination_api = copy.deepcopy(ssl_termination_template)
- ssl_termination_api['enabled'] = True
- del ssl_termination_api['privatekey']
- template = self._set_template(
- self.lb_template, sslTermination=ssl_termination_template,
- protocol="HTTP", httpsRedirect=True)
-
- expected = self._set_expected(
- self.expected_body, protocol="HTTP", httpsRedirect=False)
-
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'create')
- rsrc.clb.create(self.lb_name, **expected).AndReturn(fake_lb)
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
-
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.httpsRedirect = True
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
- fake_lb.get_ssl_termination().AndReturn({})
- fake_lb.get_ssl_termination().AndReturn(ssl_termination_api)
- self.m.StubOutWithMock(fake_lb1, 'get_ssl_termination')
- fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)
- fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)
- fake_lb1.get_ssl_termination().AndReturn(ssl_termination_api)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
-
- def test_update_lb_half_closed(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['halfClosed'] = True
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.halfClosed = True
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb, 'update')
- fake_lb.update(halfClosed=True)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_lb_port(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['port'] = 1234
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.port = 1234
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb, 'update')
- fake_lb.update(port=1234)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_lb_timeout(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['timeout'] = 120
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.timeout = 120
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb, 'update')
- fake_lb.update(timeout=120)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_health_monitor_add(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['healthMonitor'] = {
- 'type': "HTTP", 'delay': 10, 'timeout': 10,
- 'attemptsBeforeDeactivation': 4, 'path': "/",
- 'statusRegex': "^[234][0-9][0-9]$", 'bodyRegex': ".* testing .*",
- 'hostHeader': "example.com"}
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.StubOutWithMock(fake_lb, 'get_health_monitor')
- fake_lb.get_health_monitor().AndReturn({})
- fake_lb.get_health_monitor().AndReturn(
- {'type': "HTTP", 'delay': 10, 'timeout': 10,
- 'attemptsBeforeDeactivation': 4, 'path': "/",
- 'statusRegex': "^[234][0-9][0-9]$", 'bodyRegex': ".* testing .*",
- 'hostHeader': "example.com"})
-
- self.m.StubOutWithMock(fake_lb, 'add_health_monitor')
- fake_lb.add_health_monitor(
- attemptsBeforeDeactivation=4, bodyRegex='.* testing .*', delay=10,
- hostHeader='example.com', path='/',
- statusRegex='^[234][0-9][0-9]$', timeout=10, type='HTTP')
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_health_monitor_delete(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- hm = {'type': "HTTP", 'delay': 10, 'timeout': 10,
- 'attemptsBeforeDeactivation': 4, 'path': "/",
- 'statusRegex': "^[234][0-9][0-9]$", 'bodyRegex': ".* testing .*",
- 'hostHeader': "example.com"}
- template['Resources'][lb_name]['Properties']['healthMonitor'] = hm
- expected_body = copy.deepcopy(self.expected_body)
- expected_body['healthMonitor'] = hm
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected_body)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- update_template = rsrc.t.freeze(properties=self.lb_props)
-
- self.m.StubOutWithMock(fake_lb, 'get_health_monitor')
- fake_lb.get_health_monitor().AndReturn(
- {'type': "HTTP", 'delay': 10, 'timeout': 10,
- 'attemptsBeforeDeactivation': 4, 'path': "/",
- 'statusRegex': "^[234][0-9][0-9]$", 'bodyRegex': ".* testing .*",
- 'hostHeader': "example.com"})
- fake_lb.get_health_monitor().AndReturn({})
-
- self.m.StubOutWithMock(fake_lb, 'delete_health_monitor')
- fake_lb.delete_health_monitor()
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_session_persistence_add(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['sessionPersistence'] = 'SOURCE_IP'
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.assertEqual('SOURCE_IP', fake_lb.session_persistence)
- self.m.VerifyAll()
-
- def test_update_session_persistence_delete(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- template['Resources'][lb_name]['Properties'][
- 'sessionPersistence'] = "SOURCE_IP"
- expected_body = copy.deepcopy(self.expected_body)
- expected_body['sessionPersistence'] = {'persistenceType': "SOURCE_IP"}
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected_body)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- update_template = rsrc.t.freeze(properties=self.lb_props)
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.assertEqual('', fake_lb.session_persistence)
- self.m.VerifyAll()
-
- def test_update_ssl_termination_add(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['sslTermination'] = {
- 'securePort': 443,
- 'privatekey': private_key,
- 'certificate': cert,
- 'secureTrafficOnly': False,
- 'intermediateCertificate': ''
- }
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
- fake_lb.get_ssl_termination().AndReturn({})
- fake_lb.get_ssl_termination().AndReturn({
- 'securePort': 443, 'certificate': cert,
- 'secureTrafficOnly': False, 'enabled': True})
-
- self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
- fake_lb.add_ssl_termination(
- securePort=443, privatekey=private_key, certificate=cert,
- secureTrafficOnly=False, intermediateCertificate='')
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_ssl_termination_delete(self):
- template = copy.deepcopy(self.lb_template)
- ssl_termination_template = {
- 'securePort': 443, 'privatekey': private_key, 'certificate': cert,
- 'intermediateCertificate': '', 'secureTrafficOnly': False}
- ssl_termination_api = copy.deepcopy(ssl_termination_template)
- lb_name = next(iter(template['Resources']))
- template['Resources'][lb_name]['Properties']['sslTermination'] = (
- ssl_termination_template)
- # The SSL termination config is done post-creation, so no need
- # to modify self.expected_body
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
-
- self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
- fake_lb.get_ssl_termination().AndReturn({})
-
- self.m.StubOutWithMock(fake_lb, 'add_ssl_termination')
- fake_lb.add_ssl_termination(**ssl_termination_api)
-
- fake_lb.get_ssl_termination().AndReturn({
- 'securePort': 443, 'certificate': cert,
- 'secureTrafficOnly': False, 'enabled': True})
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- self.m.UnsetStubs()
- update_template = rsrc.t.freeze(properties=self.lb_props)
-
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(
- fake_lb)
-
- self.m.StubOutWithMock(fake_lb, 'get_ssl_termination')
- fake_lb.get_ssl_termination().AndReturn({
- 'securePort': 443, 'certificate': cert,
- 'secureTrafficOnly': False})
-
- self.m.StubOutWithMock(fake_lb, 'delete_ssl_termination')
- fake_lb.delete_ssl_termination()
-
- fake_lb.get_ssl_termination().AndReturn({})
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_metadata_add(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['metadata'] = {'a': 1, 'b': 2}
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.StubOutWithMock(fake_lb, 'get_metadata')
- fake_lb.get_metadata().AndReturn({})
- fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})
-
- self.m.StubOutWithMock(fake_lb, 'set_metadata')
- fake_lb.set_metadata({'a': 1, 'b': 2})
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_metadata_delete(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- template['Resources'][lb_name]['Properties']['metadata'] = {
- 'a': 1, 'b': 2}
- expected_body = copy.deepcopy(self.expected_body)
- expected_body['metadata'] = mox.SameElementsAs(
- [{'key': 'a', 'value': 1},
- {'key': 'b', 'value': 2}])
- rsrc, fake_lb = self._mock_loadbalancer(
- template, self.lb_name, expected_body)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- update_template = rsrc.t.freeze(properties=self.lb_props)
-
- self.m.StubOutWithMock(fake_lb, 'get_metadata')
- fake_lb.get_metadata().AndReturn({'a': 1, 'b': 2})
- fake_lb.get_metadata().AndReturn({})
-
- self.m.StubOutWithMock(fake_lb, 'delete_metadata')
- fake_lb.delete_metadata()
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_errorpage_add(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- error_page = (
- '<html><head><title>Service Unavailable</title></head><body><h2>'
- 'Service Unavailable</h2>The service is unavailable</body></html>')
-
- props = copy.deepcopy(self.lb_props)
- props['errorPage'] = error_page
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.StubOutWithMock(fake_lb, 'get_error_page')
- fake_lb.get_error_page().AndReturn(
- {'errorpage': {'content': 'foo'}})
- fake_lb.get_error_page().AndReturn(
- {'errorpage': {'content': error_page}})
-
- self.m.StubOutWithMock(fake_lb, 'set_error_page')
- fake_lb.set_error_page(error_page)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_errorpage_delete(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- error_page = (
- '<html><head><title>Service Unavailable</title></head><body><h2>'
- 'Service Unavailable</h2>The service is unavailable</body></html>')
- template['Resources'][lb_name]['Properties']['errorPage'] = error_page
- # The error page config is done post-creation, so no need to
- # modify self.expected_body
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
-
- self.m.StubOutWithMock(fake_lb, 'get_error_page')
- fake_lb.get_error_page().AndReturn({})
-
- self.m.StubOutWithMock(fake_lb, 'set_error_page')
- fake_lb.set_error_page(error_page)
-
- fake_lb.get_error_page().AndReturn({'errorpage':
- {'content': error_page}})
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- self.m.UnsetStubs()
- update_template = rsrc.t.freeze(properties=self.lb_props)
-
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(
- fake_lb)
-
- self.m.StubOutWithMock(fake_lb, 'clear_error_page')
- fake_lb.clear_error_page()
-
- self.m.StubOutWithMock(fake_lb, 'get_error_page')
- fake_lb.get_error_page().AndReturn(
- {'errorpage': {'content': error_page}})
- fake_lb.get_error_page().AndReturn({'errorpage': {'content': ""}})
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_connection_logging_enable(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['connectionLogging'] = True
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.assertTrue(fake_lb.connection_logging)
- self.m.VerifyAll()
-
- def test_update_connection_logging_delete(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- template['Resources'][lb_name]['Properties'][
- 'connectionLogging'] = True
- expected_body = copy.deepcopy(self.expected_body)
- expected_body['connectionLogging'] = {'enabled': True}
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected_body)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.connection_logging = True
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.connection_logging = False
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
-
- update_template = rsrc.t.freeze(properties=self.lb_props)
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.assertFalse(fake_lb.connection_logging)
- self.m.VerifyAll()
-
- def test_update_connection_logging_disable(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- template['Resources'][lb_name]['Properties'][
- 'connectionLogging'] = True
- expected_body = copy.deepcopy(self.expected_body)
- expected_body['connectionLogging'] = {'enabled': True}
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected_body)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['connectionLogging'] = False
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.assertFalse(fake_lb.connection_logging)
- self.m.VerifyAll()
-
- def test_update_connection_throttle_add(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['connectionThrottle'] = {'maxConnections': 1000}
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.StubOutWithMock(fake_lb, 'add_connection_throttle')
- self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')
- fake_lb.get_connection_throttle().AndReturn(
- {'maxConnectionRate': None, 'minConnections': None,
- 'rateInterval': None, 'maxConnections': 100})
-
- fake_lb.add_connection_throttle(
- maxConnections=1000, maxConnectionRate=None, minConnections=None,
- rateInterval=None)
-
- fake_lb.get_connection_throttle().AndReturn(
- {'maxConnectionRate': None, 'minConnections': None,
- 'rateInterval': None, 'maxConnections': 1000})
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_connection_throttle_delete(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- template['Resources'][lb_name]['Properties'][
- 'connectionThrottle'] = {'maxConnections': 1000}
- expected_body = copy.deepcopy(self.expected_body)
- expected_body['connectionThrottle'] = {
- 'maxConnections': 1000, 'maxConnectionRate': None,
- 'rateInterval': None, 'minConnections': None}
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected_body)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- del props['connectionThrottle']
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.StubOutWithMock(fake_lb, 'get_connection_throttle')
- fake_lb.get_connection_throttle().AndReturn({
- 'maxConnections': 1000, 'maxConnectionRate': None,
- 'rateInterval': None, 'minConnections': None})
-
- self.m.StubOutWithMock(fake_lb, 'delete_connection_throttle')
- fake_lb.delete_connection_throttle()
-
- fake_lb.get_connection_throttle().AndReturn({})
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_content_caching_enable(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['contentCaching'] = 'ENABLED'
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.content_caching = False
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.content_caching = True
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_content_caching_deleted(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- template['Resources'][lb_name]['Properties'][
- 'contentCaching'] = 'ENABLED'
- # Enabling the content cache is done post-creation, so no need
- # to modify self.expected_body
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- del props['contentCaching']
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.content_caching = True
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.content_caching = False
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_content_caching_disable(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- template['Resources'][lb_name]['Properties'][
- 'contentCaching'] = 'ENABLED'
- # Enabling the content cache is done post-creation, so no need
- # to modify self.expected_body
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- self.expected_body)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['contentCaching'] = 'DISABLED'
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb1.content_caching = True
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.content_caching = False
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_delete(self):
- template = self._set_template(self.lb_template,
- contentCaching='ENABLED')
- rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
- self.m.VerifyAll()
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.delete)()
- self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_delete_immutable(self):
- template = self._set_template(self.lb_template,
- contentCaching='ENABLED')
- rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.create)()
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
- rsrc.clb.get(mox.IgnoreArg()).AndRaise(lb.NotFound('foo'))
-
- self.m.StubOutWithMock(fake_lb, 'delete')
- fake_lb.delete().AndRaise(Exception('immutable'))
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.delete)()
- self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_delete_non_immutable_exc(self):
- template = self._set_template(self.lb_template,
- contentCaching='ENABLED')
- rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.create)()
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
-
- self.m.StubOutWithMock(fake_lb, 'delete')
- fake_lb.delete().AndRaise(FakeException())
- self.m.ReplayAll()
-
- exc = self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(rsrc.delete))
- self.assertIn('FakeException', six.text_type(exc))
- self.m.VerifyAll()
-
- def test_delete_states(self):
- template = self._set_template(self.lb_template,
- contentCaching='ENABLED')
- rsrc, fake_lb = self._mock_loadbalancer(template, self.lb_name,
- self.expected_body)
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.create)()
-
- self.m.UnsetStubs()
- fake_lb1 = copy.deepcopy(fake_lb)
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb3 = copy.deepcopy(fake_lb)
- self.m.StubOutWithMock(rsrc.clb, 'get')
-
- fake_lb1.status = 'ACTIVE'
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
- fake_lb2.status = 'PENDING_DELETE'
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
- fake_lb3.status = 'DELETED'
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb3)
-
- self.m.ReplayAll()
-
- scheduler.TaskRunner(rsrc.delete)()
- self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_redir(self):
- mock_stack = mock.Mock()
- mock_stack.db_resource_get.return_value = None
- mock_stack.has_cache_data.return_value = False
- props = {'httpsRedirect': True,
- 'protocol': 'HTTPS',
- 'port': 443,
- 'nodes': [],
- 'virtualIps': [{'id': '1234'}]}
- mock_resdef = rsrc_defn.ResourceDefinition("test_lb",
- LoadBalancerWithFakeClient,
- properties=props)
- mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
- self.assertIsNone(mock_lb.validate())
- props['protocol'] = 'HTTP'
- props['sslTermination'] = {
- 'secureTrafficOnly': True,
- 'securePort': 443,
- 'privatekey': "bobloblaw",
- 'certificate': 'mycert'
- }
- mock_resdef = rsrc_defn.ResourceDefinition("test_lb_2",
- LoadBalancerWithFakeClient,
- properties=props)
- mock_lb = lb.CloudLoadBalancer("test_2", mock_resdef, mock_stack)
- self.assertIsNone(mock_lb.validate())
-
- def test_invalid_redir_proto(self):
- mock_stack = mock.Mock()
- mock_stack.db_resource_get.return_value = None
- mock_stack.has_cache_data.return_value = False
- props = {'httpsRedirect': True,
- 'protocol': 'TCP',
- 'port': 1234,
- 'nodes': [],
- 'virtualIps': [{'id': '1234'}]}
- mock_resdef = rsrc_defn.ResourceDefinition("test_lb",
- LoadBalancerWithFakeClient,
- properties=props)
- mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
- ex = self.assertRaises(exception.StackValidationFailed,
- mock_lb.validate)
- self.assertIn("HTTPS redirect is only available", six.text_type(ex))
-
- def test_invalid_redir_ssl(self):
- mock_stack = mock.Mock()
- mock_stack.db_resource_get.return_value = None
- mock_stack.has_cache_data.return_value = False
- props = {'httpsRedirect': True,
- 'protocol': 'HTTP',
- 'port': 1234,
- 'nodes': [],
- 'virtualIps': [{'id': '1234'}]}
- mock_resdef = rsrc_defn.ResourceDefinition("test_lb",
- LoadBalancerWithFakeClient,
- properties=props)
- mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
- ex = self.assertRaises(exception.StackValidationFailed,
- mock_lb.validate)
- self.assertIn("HTTPS redirect is only available", six.text_type(ex))
- props['sslTermination'] = {
- 'secureTrafficOnly': False,
- 'securePort': 443,
- 'privatekey': "bobloblaw",
- 'certificate': 'mycert'
- }
- mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
- ex = self.assertRaises(exception.StackValidationFailed,
- mock_lb.validate)
- self.assertIn("HTTPS redirect is only available", six.text_type(ex))
- props['sslTermination'] = {
- 'secureTrafficOnly': True,
- 'securePort': 1234,
- 'privatekey': "bobloblaw",
- 'certificate': 'mycert'
- }
- mock_lb = lb.CloudLoadBalancer("test", mock_resdef, mock_stack)
- ex = self.assertRaises(exception.StackValidationFailed,
- mock_lb.validate)
- self.assertIn("HTTPS redirect is only available", six.text_type(ex))
-
- def test_update_nodes_condition_draining(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- fake_lb.nodes = self.expected_body['nodes']
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- expected_ip = '172.168.1.4'
- props['nodes'] = [
- {"addresses": ["166.78.103.141"],
- "port": 80,
- "condition": "DRAINING",
- "type": "PRIMARY",
- "weight": 1},
- {"addresses": [expected_ip],
- "port": 80,
- "condition": "DRAINING",
- "type": "PRIMARY",
- "weight": 1}]
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb1, 'add_nodes')
- fake_lb1.add_nodes([
- fake_lb1.Node(address=expected_ip,
- port=80,
- condition='DRAINING',
- type="PRIMARY", weight=1)])
-
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.nodes = [
- FakeNode(address=u"166.78.103.141", port=80,
- condition=u"DRAINING", type="PRIMARY", weight=1),
- FakeNode(address=u"172.168.1.4", port=80,
- condition=u"DRAINING", type="PRIMARY", weight=1),
- ]
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_nodes_add_same_address_different_port(self):
- rsrc, fake_lb = self._mock_loadbalancer(self.lb_template,
- self.lb_name,
- self.expected_body)
- fake_lb.nodes = self.expected_body['nodes']
- fake_lb.tracker = "fake_lb"
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['nodes'] = [
- {"addresses": ["166.78.103.141"],
- "port": 80,
- "condition": "ENABLED",
- "type": "PRIMARY",
- "weight": 1},
- {"addresses": ["166.78.103.141"],
- "port": 81,
- "condition": "ENABLED",
- "type": "PRIMARY",
- "weight": 1}]
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb1, 'add_nodes')
- fake_lb1.add_nodes([
- fake_lb1.Node(address="166.78.103.141",
- port=81,
- condition='ENABLED',
- type="PRIMARY", weight=1)])
- fake_lb1.tracker = "fake_lb1"
-
- fake_lb2 = copy.deepcopy(fake_lb)
- fake_lb2.nodes = [
- FakeNode(address=u"166.78.103.141", port=80,
- condition=u"ENABLED", type="PRIMARY", weight=1),
- FakeNode(address=u"166.78.103.141", port=81,
- condition=u"ENABLED", type="PRIMARY", weight=1),
- ]
- fake_lb2.tracker = "fake_lb2"
- rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
-
- def test_update_nodes_defaults(self):
- template = copy.deepcopy(self.lb_template)
- lb_name = next(iter(template['Resources']))
- tmpl_node = template['Resources'][lb_name]['Properties']['nodes'][0]
- tmpl_node['type'] = "PRIMARY"
- tmpl_node['condition'] = "ENABLED"
- tmpl_node['weight'] = 1
- expected_body = copy.deepcopy(self.expected_body)
- expected_body['nodes'] = [FakeNode(address=u"166.78.103.141", port=80,
- condition=u"ENABLED",
- type="PRIMARY", weight=1)]
-
- rsrc, fake_lb = self._mock_loadbalancer(template,
- self.lb_name,
- expected_body)
- fake_lb.nodes = self.expected_body['nodes']
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.create)()
-
- props = copy.deepcopy(self.lb_props)
- props['nodes'] = [{"addresses": ["166.78.103.141"], "port": 80}]
- update_template = rsrc.t.freeze(properties=props)
-
- self.m.UnsetStubs()
- self.m.StubOutWithMock(rsrc.clb, 'get')
- fake_lb1 = copy.deepcopy(fake_lb)
- rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb1)
-
- self.m.StubOutWithMock(fake_lb1, 'add_nodes')
-
- self.m.ReplayAll()
- scheduler.TaskRunner(rsrc.update, update_template)()
- self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
- self.m.VerifyAll()
diff --git a/contrib/rackspace/rackspace/tests/test_cloudnetworks.py b/contrib/rackspace/rackspace/tests/test_cloudnetworks.py
deleted file mode 100644
index 29c36e238..000000000
--- a/contrib/rackspace/rackspace/tests/test_cloudnetworks.py
+++ /dev/null
@@ -1,199 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import mock
-from oslo_utils import reflection
-import six
-
-from heat.common import exception
-from heat.common import template_format
-from heat.engine import resource
-from heat.engine import scheduler
-from heat.tests import common
-from heat.tests import utils
-
-from ..resources import cloudnetworks # noqa
-
-try:
- from pyrax.exceptions import NotFound # noqa
-except ImportError:
- from ..resources.cloudnetworks import NotFound # noqa
-
-
-class FakeNetwork(object):
-
- def __init__(self, client, label="test_network", cidr="172.16.0.0/24"):
- self.client = client
- self.label = label
- self.cidr = cidr
- self.id = str(uuid.uuid4())
-
- def _is_deleted(self):
- return (self.client and
- self.id not in [nw.id for nw in self.client.networks])
-
- def get(self):
- if self._is_deleted():
- raise NotFound("I am deleted")
-
- def delete(self):
- self.client._delete(self)
-
-
-class FakeClient(object):
-
- def __init__(self):
- self.networks = []
-
- def create(self, label=None, cidr=None):
- nw = FakeNetwork(self, label=label, cidr=cidr)
- self.networks.append(nw)
- return nw
-
- def get(self, nwid):
- for nw in self.networks:
- if nw.id == nwid:
- return nw
- raise NotFound("No network %s" % nwid)
-
- def _delete(self, nw):
- try:
- self.networks.remove(nw)
- except ValueError:
- pass
-
-
-class FakeClientRaiseException(FakeClient):
-
- def create(self, label=None, cidr=None):
- raise Exception
-
- def get(self, nwid):
- raise Exception
-
-
-@mock.patch.object(cloudnetworks.CloudNetwork, "cloud_networks")
-class CloudNetworkTest(common.HeatTestCase):
-
- _template = template_format.parse("""
- heat_template_version: 2013-05-23
- description: Test stack for Rackspace Cloud Networks
- resources:
- cnw:
- type: Rackspace::Cloud::Network
- properties:
- label: test_network
- cidr: 172.16.0.0/24
- """)
-
- def setUp(self):
- super(CloudNetworkTest, self).setUp()
- resource._register_class("Rackspace::Cloud::Network",
- cloudnetworks.CloudNetwork)
-
- def _parse_stack(self):
- class_name = reflection.get_class_name(self, fully_qualified=False)
- self.stack = utils.parse_stack(self._template,
- stack_name=class_name)
-
- def _setup_stack(self, mock_client, *args):
- self.fake_cnw = FakeClient(*args)
- mock_client.return_value = self.fake_cnw
- self._parse_stack()
- self.stack.create()
- self.assertEqual((self.stack.CREATE, self.stack.COMPLETE),
- self.stack.state)
- res = self.stack['cnw']
- self.assertEqual((res.CREATE, res.COMPLETE), res.state)
-
- def test_attributes(self, mock_client):
- self._setup_stack(mock_client)
- res = self.stack['cnw']
- template_resource = self._template['resources']['cnw']
- expect_label = template_resource['properties']['label']
- expect_cidr = template_resource['properties']['cidr']
- self.assertEqual(expect_label, res.FnGetAtt('label'))
- self.assertEqual(expect_cidr, res.FnGetAtt('cidr'))
-
- def test_create_bad_cidr(self, mock_client):
- prop = self._template['resources']['cnw']['properties']
- prop['cidr'] = "bad cidr"
- self._parse_stack()
- exc = self.assertRaises(exception.StackValidationFailed,
- self.stack.validate)
- self.assertIn("Invalid net cidr", six.text_type(exc))
- # reset property
- prop['cidr'] = "172.16.0.0/24"
-
- def test_check(self, mock_client):
- self._setup_stack(mock_client)
- res = self.stack['cnw']
- scheduler.TaskRunner(res.check)()
- self.assertEqual((res.CHECK, res.COMPLETE), res.state)
-
- self.fake_cnw.networks = []
- exc = self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(res.check))
- self.assertEqual((res.CHECK, res.FAILED), res.state)
- self.assertIn('No network', str(exc))
-
- def test_delete(self, mock_client):
- self._setup_stack(mock_client)
- res = self.stack['cnw']
- res_id = res.FnGetRefId()
- scheduler.TaskRunner(res.delete)()
- self.assertEqual((res.DELETE, res.COMPLETE), res.state)
- exc = self.assertRaises(NotFound, self.fake_cnw.get, res_id)
- self.assertIn(res_id, six.text_type(exc))
-
- def test_delete_no_network_created(self, mock_client):
- self.fake_cnw = FakeClientRaiseException()
- mock_client.return_value = self.fake_cnw
- self._parse_stack()
- self.stack.create()
- self.assertEqual((self.stack.CREATE, self.stack.FAILED),
- self.stack.state)
- res = self.stack['cnw']
- self.assertEqual((res.CREATE, res.FAILED), res.state)
- scheduler.TaskRunner(res.delete)()
- self.assertEqual((res.DELETE, res.COMPLETE), res.state)
-
- def test_delete_in_use(self, mock_client):
- self._setup_stack(mock_client)
- res = self.stack['cnw']
- fake_network = res.network()
- fake_network.delete = mock.Mock()
- fake_network.delete.side_effect = [cloudnetworks.NetworkInUse(), True]
- mock_client.return_value = fake_network
- fake_network.get = mock.Mock()
- fake_network.get.side_effect = [cloudnetworks.NotFound()]
-
- scheduler.TaskRunner(res.delete)()
- self.assertEqual((res.DELETE, res.COMPLETE), res.state)
-
- def test_delete_not_complete(self, mock_client):
- self._setup_stack(mock_client)
- res = self.stack['cnw']
- mock_client.get = mock.Mock()
-
- task = res.handle_delete()
- self.assertFalse(res.check_delete_complete(task))
-
- def test_delete_not_found(self, mock_client):
- self._setup_stack(mock_client)
- self.fake_cnw.networks = []
- res = self.stack['cnw']
- scheduler.TaskRunner(res.delete)()
- self.assertEqual((res.DELETE, res.COMPLETE), res.state)
diff --git a/contrib/rackspace/rackspace/tests/test_lb_node.py b/contrib/rackspace/rackspace/tests/test_lb_node.py
deleted file mode 100644
index b7f778a7f..000000000
--- a/contrib/rackspace/rackspace/tests/test_lb_node.py
+++ /dev/null
@@ -1,305 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import mock
-
-from heat.engine import rsrc_defn
-from heat.tests import common
-
-from ..resources import lb_node # noqa
-from ..resources.lb_node import ( # noqa
- LoadbalancerDeleted,
- NotFound,
- NodeNotFound)
-
-from .test_cloud_loadbalancer import FakeNode # noqa
-
-
-class LBNode(lb_node.LBNode):
- @classmethod
- def is_service_available(cls, context):
- return (True, None)
-
-
-class LBNodeTest(common.HeatTestCase):
- def setUp(self):
- super(LBNodeTest, self).setUp()
- self.mockstack = mock.Mock()
- self.mockstack.has_cache_data.return_value = False
- self.mockstack.db_resource_get.return_value = None
- self.mockclient = mock.Mock()
- self.mockstack.clients.client.return_value = self.mockclient
-
- self.def_props = {
- LBNode.LOAD_BALANCER: 'some_lb_id',
- LBNode.DRAINING_TIMEOUT: 60,
- LBNode.ADDRESS: 'some_ip',
- LBNode.PORT: 80,
- LBNode.CONDITION: 'ENABLED',
- LBNode.TYPE: 'PRIMARY',
- LBNode.WEIGHT: None,
- }
- self.resource_def = rsrc_defn.ResourceDefinition(
- "test", LBNode, properties=self.def_props)
-
- self.resource = LBNode("test", self.resource_def, self.mockstack)
- self.resource.resource_id = 12345
-
- def test_create(self):
- self.resource.resource_id = None
-
- fake_lb = mock.Mock()
- fake_lb.add_nodes.return_value = (None, {'nodes': [{'id': 12345}]})
- self.mockclient.get.return_value = fake_lb
-
- fake_node = mock.Mock()
- self.mockclient.Node.return_value = fake_node
-
- self.resource.check_create_complete()
-
- self.mockclient.get.assert_called_once_with('some_lb_id')
- self.mockclient.Node.assert_called_once_with(
- address='some_ip', port=80, condition='ENABLED',
- type='PRIMARY', weight=0)
- fake_lb.add_nodes.assert_called_once_with([fake_node])
- self.assertEqual(self.resource.resource_id, 12345)
-
- def test_create_lb_not_found(self):
- self.mockclient.get.side_effect = NotFound()
- self.assertRaises(NotFound, self.resource.check_create_complete)
-
- def test_create_lb_deleted(self):
- fake_lb = mock.Mock()
- fake_lb.id = 1111
- fake_lb.status = 'DELETED'
- self.mockclient.get.return_value = fake_lb
-
- exc = self.assertRaises(LoadbalancerDeleted,
- self.resource.check_create_complete)
- self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
- str(exc))
-
- def test_create_lb_pending_delete(self):
- fake_lb = mock.Mock()
- fake_lb.id = 1111
- fake_lb.status = 'PENDING_DELETE'
- self.mockclient.get.return_value = fake_lb
-
- exc = self.assertRaises(LoadbalancerDeleted,
- self.resource.check_create_complete)
- self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
- str(exc))
-
- def test_handle_update_method(self):
- self.assertEqual(self.resource.handle_update(None, None, 'foo'), 'foo')
-
- def _test_update(self, diff):
- fake_lb = mock.Mock()
- fake_node = FakeNode(id=12345, address='a', port='b')
- fake_node.update = mock.Mock()
- expected_node = FakeNode(id=12345, address='a', port='b', **diff)
- expected_node.update = fake_node.update
- fake_lb.nodes = [fake_node]
- self.mockclient.get.return_value = fake_lb
-
- self.assertFalse(self.resource.check_update_complete(prop_diff=diff))
-
- self.mockclient.get.assert_called_once_with('some_lb_id')
- fake_node.update.assert_called_once_with()
- self.assertEqual(fake_node, expected_node)
-
- def test_update_condition(self):
- self._test_update({'condition': 'DISABLED'})
-
- def test_update_weight(self):
- self._test_update({'weight': 100})
-
- def test_update_type(self):
- self._test_update({'type': 'SECONDARY'})
-
- def test_update_multiple(self):
- self._test_update({'condition': 'DISABLED',
- 'weight': 100,
- 'type': 'SECONDARY'})
-
- def test_update_finished(self):
- fake_lb = mock.Mock()
- fake_node = FakeNode(id=12345, address='a', port='b',
- condition='ENABLED')
- fake_node.update = mock.Mock()
- expected_node = FakeNode(id=12345, address='a', port='b',
- condition='ENABLED')
- expected_node.update = fake_node.update
- fake_lb.nodes = [fake_node]
- self.mockclient.get.return_value = fake_lb
-
- diff = {'condition': 'ENABLED'}
- self.assertTrue(self.resource.check_update_complete(prop_diff=diff))
-
- self.mockclient.get.assert_called_once_with('some_lb_id')
- self.assertFalse(fake_node.update.called)
- self.assertEqual(fake_node, expected_node)
-
- def test_update_lb_not_found(self):
- self.mockclient.get.side_effect = NotFound()
-
- diff = {'condition': 'ENABLED'}
- self.assertRaises(NotFound, self.resource.check_update_complete,
- prop_diff=diff)
-
- def test_update_lb_deleted(self):
- fake_lb = mock.Mock()
- fake_lb.id = 1111
- fake_lb.status = 'DELETED'
- self.mockclient.get.return_value = fake_lb
-
- diff = {'condition': 'ENABLED'}
- exc = self.assertRaises(LoadbalancerDeleted,
- self.resource.check_update_complete,
- prop_diff=diff)
- self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
- str(exc))
-
- def test_update_lb_pending_delete(self):
- fake_lb = mock.Mock()
- fake_lb.id = 1111
- fake_lb.status = 'PENDING_DELETE'
- self.mockclient.get.return_value = fake_lb
-
- diff = {'condition': 'ENABLED'}
- exc = self.assertRaises(LoadbalancerDeleted,
- self.resource.check_update_complete,
- prop_diff=diff)
- self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
- str(exc))
-
- def test_update_node_not_found(self):
- fake_lb = mock.Mock()
- fake_lb.id = 4444
- fake_lb.nodes = []
- self.mockclient.get.return_value = fake_lb
-
- diff = {'condition': 'ENABLED'}
- exc = self.assertRaises(NodeNotFound,
- self.resource.check_update_complete,
- prop_diff=diff)
- self.assertEqual(
- "Node (ID 12345) not found on Load Balancer (ID 4444).", str(exc))
-
- def test_delete_no_id(self):
- self.resource.resource_id = None
- self.assertTrue(self.resource.check_delete_complete(None))
-
- def test_delete_lb_already_deleted(self):
- self.mockclient.get.side_effect = NotFound()
- self.assertTrue(self.resource.check_delete_complete(None))
- self.mockclient.get.assert_called_once_with('some_lb_id')
-
- def test_delete_lb_deleted_status(self):
- fake_lb = mock.Mock()
- fake_lb.status = 'DELETED'
- self.mockclient.get.return_value = fake_lb
-
- self.assertTrue(self.resource.check_delete_complete(None))
- self.mockclient.get.assert_called_once_with('some_lb_id')
-
- def test_delete_lb_pending_delete_status(self):
- fake_lb = mock.Mock()
- fake_lb.status = 'PENDING_DELETE'
- self.mockclient.get.return_value = fake_lb
-
- self.assertTrue(self.resource.check_delete_complete(None))
- self.mockclient.get.assert_called_once_with('some_lb_id')
-
- def test_delete_node_already_deleted(self):
- fake_lb = mock.Mock()
- fake_lb.nodes = []
- self.mockclient.get.return_value = fake_lb
-
- self.assertTrue(self.resource.check_delete_complete(None))
- self.mockclient.get.assert_called_once_with('some_lb_id')
-
- @mock.patch.object(lb_node.timeutils, 'utcnow')
- def test_drain_before_delete(self, mock_utcnow):
- fake_lb = mock.Mock()
- fake_node = FakeNode(id=12345, address='a', port='b')
- expected_node = FakeNode(id=12345, address='a', port='b',
- condition='DRAINING')
- fake_node.update = mock.Mock()
- expected_node.update = fake_node.update
- fake_node.delete = mock.Mock()
- expected_node.delete = fake_node.delete
- fake_lb.nodes = [fake_node]
- self.mockclient.get.return_value = fake_lb
-
- now = datetime.datetime.utcnow()
- mock_utcnow.return_value = now
-
- self.assertFalse(self.resource.check_delete_complete(now))
-
- self.mockclient.get.assert_called_once_with('some_lb_id')
- fake_node.update.assert_called_once_with()
- self.assertFalse(fake_node.delete.called)
- self.assertEqual(fake_node, expected_node)
-
- @mock.patch.object(lb_node.timeutils, 'utcnow')
- def test_delete_waiting(self, mock_utcnow):
- fake_lb = mock.Mock()
- fake_node = FakeNode(id=12345, address='a', port='b',
- condition='DRAINING')
- expected_node = FakeNode(id=12345, address='a', port='b',
- condition='DRAINING')
- fake_node.update = mock.Mock()
- expected_node.update = fake_node.update
- fake_node.delete = mock.Mock()
- expected_node.delete = fake_node.delete
- fake_lb.nodes = [fake_node]
- self.mockclient.get.return_value = fake_lb
-
- now = datetime.datetime.utcnow()
- now_plus_30 = now + datetime.timedelta(seconds=30)
- mock_utcnow.return_value = now_plus_30
-
- self.assertFalse(self.resource.check_delete_complete(now))
-
- self.mockclient.get.assert_called_once_with('some_lb_id')
- self.assertFalse(fake_node.update.called)
- self.assertFalse(fake_node.delete.called)
- self.assertEqual(fake_node, expected_node)
-
- @mock.patch.object(lb_node.timeutils, 'utcnow')
- def test_delete_finishing(self, mock_utcnow):
- fake_lb = mock.Mock()
- fake_node = FakeNode(id=12345, address='a', port='b',
- condition='DRAINING')
- expected_node = FakeNode(id=12345, address='a', port='b',
- condition='DRAINING')
- fake_node.update = mock.Mock()
- expected_node.update = fake_node.update
- fake_node.delete = mock.Mock()
- expected_node.delete = fake_node.delete
- fake_lb.nodes = [fake_node]
- self.mockclient.get.return_value = fake_lb
-
- now = datetime.datetime.utcnow()
- now_plus_62 = now + datetime.timedelta(seconds=62)
- mock_utcnow.return_value = now_plus_62
-
- self.assertFalse(self.resource.check_delete_complete(now))
-
- self.mockclient.get.assert_called_once_with('some_lb_id')
- self.assertFalse(fake_node.update.called)
- self.assertTrue(fake_node.delete.called)
- self.assertEqual(fake_node, expected_node)
diff --git a/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py b/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py
deleted file mode 100644
index 96877f3d5..000000000
--- a/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py
+++ /dev/null
@@ -1,662 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo_config import cfg
-from oslo_utils import uuidutils
-import six
-
-from heat.common import exception
-from heat.common import template_format
-from heat.engine.clients.os import glance
-from heat.engine.clients.os import neutron
-from heat.engine.clients.os import nova
-from heat.engine import environment
-from heat.engine import resource
-from heat.engine import rsrc_defn
-from heat.engine import scheduler
-from heat.engine import stack as parser
-from heat.engine import template
-from heat.tests import common
-from heat.tests.openstack.nova import fakes
-from heat.tests import utils
-
-from ..resources import cloud_server # noqa
-
-wp_template = '''
-{
- "AWSTemplateFormatVersion" : "2010-09-09",
- "Description" : "WordPress",
- "Parameters" : {
- "key_name" : {
- "Description" : "key_name",
- "Type" : "String",
- "Default" : "test"
- }
- },
- "Resources" : {
- "WebServer": {
- "Type": "OS::Nova::Server",
- "Properties": {
- "image" : "CentOS 5.2",
- "flavor" : "256 MB Server",
- "key_name" : "test",
- "user_data" : "wordpress"
- }
- }
- }
-}
-'''
-
-cfg.CONF.import_opt('region_name_for_services', 'heat.common.config')
-
-
-class CloudServersTest(common.HeatTestCase):
- def setUp(self):
- super(CloudServersTest, self).setUp()
- cfg.CONF.set_override('region_name_for_services', 'RegionOne')
- self.ctx = utils.dummy_context()
-
- self.fc = fakes.FakeClient()
- mock_nova_create = mock.Mock()
- self.ctx.clients.client_plugin(
- 'nova')._create = mock_nova_create
- mock_nova_create.return_value = self.fc
-
- # Test environment may not have pyrax client library installed and if
- # pyrax is not installed resource class would not be registered.
- # So register resource provider class explicitly for unit testing.
- resource._register_class("OS::Nova::Server",
- cloud_server.CloudServer)
-
- def _setup_test_stack(self, stack_name):
- t = template_format.parse(wp_template)
- templ = template.Template(
- t, env=environment.Environment({'key_name': 'test'}))
-
- self.stack = parser.Stack(self.ctx, stack_name, templ,
- stack_id=uuidutils.generate_uuid())
- return (templ, self.stack)
-
- def _setup_test_server(self, return_server, name, image_id=None,
- override_name=False, stub_create=True):
- stack_name = '%s_s' % name
- (tmpl, stack) = self._setup_test_stack(stack_name)
-
- tmpl.t['Resources']['WebServer']['Properties'][
- 'image'] = image_id or 'CentOS 5.2'
- tmpl.t['Resources']['WebServer']['Properties'][
- 'flavor'] = '256 MB Server'
- self.patchobject(neutron.NeutronClientPlugin,
- 'find_resourceid_by_name_or_id',
- return_value='aaaaaa')
- self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
- return_value=1)
- self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
- return_value=1)
- server_name = '%s' % name
- if override_name:
- tmpl.t['Resources']['WebServer']['Properties'][
- 'name'] = server_name
-
- resource_defns = tmpl.resource_definitions(stack)
- server = cloud_server.CloudServer(server_name,
- resource_defns['WebServer'],
- stack)
- self.patchobject(nova.NovaClientPlugin, '_create',
- return_value=self.fc)
-
- self.patchobject(server, 'store_external_ports')
-
- if stub_create:
- self.patchobject(self.fc.servers, 'create',
- return_value=return_server)
- # mock check_create_complete innards
- self.patchobject(self.fc.servers, 'get',
- return_value=return_server)
- return server
-
- def _create_test_server(self, return_server, name, override_name=False,
- stub_create=True):
- server = self._setup_test_server(return_server, name,
- stub_create=stub_create)
- scheduler.TaskRunner(server.create)()
- return server
-
- def _mock_metadata_os_distro(self):
- image_data = mock.Mock(metadata={'os_distro': 'centos'})
- self.fc.images.get = mock.Mock(return_value=image_data)
-
- def test_rackconnect_deployed(self):
- return_server = self.fc.servers.list()[1]
- return_server.metadata = {
- 'rackconnect_automation_status': 'DEPLOYED',
- 'rax_service_level_automation': 'Complete',
- }
- server = self._setup_test_server(return_server,
- 'test_rackconnect_deployed')
- server.context.roles = ['rack_connect']
- scheduler.TaskRunner(server.create)()
- self.assertEqual('CREATE', server.action)
- self.assertEqual('COMPLETE', server.status)
-
- def test_rackconnect_failed(self):
- return_server = self.fc.servers.list()[1]
- return_server.metadata = {
- 'rackconnect_automation_status': 'FAILED',
- 'rax_service_level_automation': 'Complete',
- }
- server = self._setup_test_server(return_server,
- 'test_rackconnect_failed')
- server.context.roles = ['rack_connect']
- create = scheduler.TaskRunner(server.create)
- exc = self.assertRaises(exception.ResourceFailure, create)
- self.assertEqual('Error: resources.test_rackconnect_failed: '
- 'RackConnect automation FAILED',
- six.text_type(exc))
-
- def test_rackconnect_unprocessable(self):
- return_server = self.fc.servers.list()[1]
- return_server.metadata = {
- 'rackconnect_automation_status': 'UNPROCESSABLE',
- 'rackconnect_unprocessable_reason': 'Fake reason',
- 'rax_service_level_automation': 'Complete',
- }
- server = self._setup_test_server(return_server,
- 'test_rackconnect_unprocessable')
- server.context.roles = ['rack_connect']
- scheduler.TaskRunner(server.create)()
- self.assertEqual('CREATE', server.action)
- self.assertEqual('COMPLETE', server.status)
-
- def test_rackconnect_unknown(self):
- return_server = self.fc.servers.list()[1]
- return_server.metadata = {
- 'rackconnect_automation_status': 'FOO',
- 'rax_service_level_automation': 'Complete',
- }
- server = self._setup_test_server(return_server,
- 'test_rackconnect_unknown')
- server.context.roles = ['rack_connect']
- create = scheduler.TaskRunner(server.create)
- exc = self.assertRaises(exception.ResourceFailure, create)
- self.assertEqual('Error: resources.test_rackconnect_unknown: '
- 'Unknown RackConnect automation status: FOO',
- six.text_type(exc))
-
- def test_rackconnect_deploying(self):
- return_server = self.fc.servers.list()[0]
- server = self._setup_test_server(return_server,
- 'srv_sts_bld')
- server.resource_id = 1234
- server.context.roles = ['rack_connect']
- check_iterations = [0]
-
- # Bind fake get method which check_create_complete will call
- def activate_status(server):
- check_iterations[0] += 1
- if check_iterations[0] == 1:
- return_server.metadata.update({
- 'rackconnect_automation_status': 'DEPLOYING',
- 'rax_service_level_automation': 'Complete',
- })
- if check_iterations[0] == 2:
- return_server.status = 'ACTIVE'
- if check_iterations[0] > 3:
- return_server.metadata.update({
- 'rackconnect_automation_status': 'DEPLOYED',
- })
- return return_server
- self.patchobject(self.fc.servers, 'get',
- side_effect=activate_status)
-
- scheduler.TaskRunner(server.create)()
- self.assertEqual((server.CREATE, server.COMPLETE), server.state)
-
- def test_rackconnect_no_status(self):
- return_server = self.fc.servers.list()[0]
- server = self._setup_test_server(return_server,
- 'srv_sts_bld')
-
- server.resource_id = 1234
- server.context.roles = ['rack_connect']
-
- check_iterations = [0]
-
- # Bind fake get method which check_create_complete will call
- def activate_status(server):
- check_iterations[0] += 1
- if check_iterations[0] == 1:
- return_server.status = 'ACTIVE'
- if check_iterations[0] > 2:
- return_server.metadata.update({
- 'rackconnect_automation_status': 'DEPLOYED',
- 'rax_service_level_automation': 'Complete'})
-
- return return_server
- self.patchobject(self.fc.servers, 'get',
- side_effect=activate_status)
- scheduler.TaskRunner(server.create)()
- self.assertEqual((server.CREATE, server.COMPLETE), server.state)
-
- def test_rax_automation_lifecycle(self):
- return_server = self.fc.servers.list()[0]
- server = self._setup_test_server(return_server,
- 'srv_sts_bld')
- server.resource_id = 1234
- server.context.roles = ['rack_connect']
- server.metadata = {}
- check_iterations = [0]
-
- # Bind fake get method which check_create_complete will call
- def activate_status(server):
- check_iterations[0] += 1
- if check_iterations[0] == 1:
- return_server.status = 'ACTIVE'
- if check_iterations[0] == 2:
- return_server.metadata = {
- 'rackconnect_automation_status': 'DEPLOYED'}
- if check_iterations[0] == 3:
- return_server.metadata = {
- 'rackconnect_automation_status': 'DEPLOYED',
- 'rax_service_level_automation': 'In Progress'}
- if check_iterations[0] > 3:
- return_server.metadata = {
- 'rackconnect_automation_status': 'DEPLOYED',
- 'rax_service_level_automation': 'Complete'}
- return return_server
- self.patchobject(self.fc.servers, 'get',
- side_effect=activate_status)
- scheduler.TaskRunner(server.create)()
- self.assertEqual((server.CREATE, server.COMPLETE), server.state)
-
- def test_add_port_for_addresses(self):
- return_server = self.fc.servers.list()[1]
- return_server.metadata = {'rax_service_level_automation': 'Complete'}
- stack_name = 'test_stack'
- (tmpl, stack) = self._setup_test_stack(stack_name)
- resource_defns = tmpl.resource_definitions(stack)
- self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
- return_value=1)
- self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
- return_value=1)
- server = cloud_server.CloudServer('WebServer',
- resource_defns['WebServer'], stack)
- self.patchobject(server, 'store_external_ports')
-
- class Interface(object):
- def __init__(self, id, addresses):
- self.identifier = id
- self.addresses = addresses
-
- @property
- def id(self):
- return self.identifier
-
- @property
- def ip_addresses(self):
- return self.addresses
-
- interfaces = [
- {
- "id": "port-uuid-1",
- "ip_addresses": [
- {
- "address": "4.5.6.7",
- "network_id": "00xx000-0xx0-0xx0-0xx0-00xxx000",
- "network_label": "public"
- },
- {
- "address": "2001:4802:7805:104:be76:4eff:fe20:2063",
- "network_id": "00xx000-0xx0-0xx0-0xx0-00xxx000",
- "network_label": "public"
- }
- ],
- "mac_address": "fa:16:3e:8c:22:aa"
- },
- {
- "id": "port-uuid-2",
- "ip_addresses": [
- {
- "address": "5.6.9.8",
- "network_id": "11xx1-1xx1-xx11-1xx1-11xxxx11",
- "network_label": "public"
- }
- ],
- "mac_address": "fa:16:3e:8c:44:cc"
- },
- {
- "id": "port-uuid-3",
- "ip_addresses": [
- {
- "address": "10.13.12.13",
- "network_id": "1xx1-1xx1-xx11-1xx1-11xxxx11",
- "network_label": "private"
- }
- ],
- "mac_address": "fa:16:3e:8c:44:dd"
- }
- ]
-
- ifaces = [Interface(i['id'], i['ip_addresses']) for i in interfaces]
- expected = {
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa':
- [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:22:aa',
- 'addr': '4.5.6.7',
- 'port': 'port-uuid-1',
- 'version': 4},
- {'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:33:bb',
- 'addr': '5.6.9.8',
- 'port': 'port-uuid-2',
- 'version': 4}],
-
- 'private': [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:44:cc',
- 'addr': '10.13.12.13',
- 'port': 'port-uuid-3',
- 'version': 4}],
- 'public': [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:22:aa',
- 'addr': '4.5.6.7',
- 'port': 'port-uuid-1',
- 'version': 4},
- {'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:8c:33:bb',
- 'addr': '5.6.9.8',
- 'port': 'port-uuid-2',
- 'version': 4}]}
-
- server.client = mock.Mock()
- mock_client = mock.Mock()
- server.client.return_value = mock_client
- mock_ext = mock_client.os_virtual_interfacesv2_python_novaclient_ext
- mock_ext.list.return_value = ifaces
- resp = server._add_port_for_address(return_server)
- self.assertEqual(expected, resp)
-
- def test_rax_automation_build_error(self):
- return_server = self.fc.servers.list()[1]
- return_server.metadata = {'rax_service_level_automation':
- 'Build Error'}
- server = self._setup_test_server(return_server,
- 'test_managed_cloud_build_error')
- create = scheduler.TaskRunner(server.create)
- exc = self.assertRaises(exception.ResourceFailure, create)
- self.assertEqual('Error: resources.test_managed_cloud_build_error: '
- 'Rackspace Cloud automation failed',
- six.text_type(exc))
-
- def test_rax_automation_unknown(self):
- return_server = self.fc.servers.list()[1]
- return_server.metadata = {'rax_service_level_automation': 'FOO'}
- server = self._setup_test_server(return_server,
- 'test_managed_cloud_unknown')
- create = scheduler.TaskRunner(server.create)
- exc = self.assertRaises(exception.ResourceFailure, create)
- self.assertEqual('Error: resources.test_managed_cloud_unknown: '
- 'Unknown Rackspace Cloud automation status: FOO',
- six.text_type(exc))
-
- def _test_server_config_drive(self, user_data, config_drive, result,
- ud_format='RAW'):
- return_server = self.fc.servers.list()[1]
- return_server.metadata = {'rax_service_level_automation': 'Complete'}
- stack_name = 'no_user_data'
- self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
- return_value=1)
- self.patchobject(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
- return_value=1)
- (tmpl, stack) = self._setup_test_stack(stack_name)
- properties = tmpl.t['Resources']['WebServer']['Properties']
- properties['user_data'] = user_data
- properties['config_drive'] = config_drive
- properties['user_data_format'] = ud_format
- properties['software_config_transport'] = "POLL_TEMP_URL"
- resource_defns = tmpl.resource_definitions(stack)
- server = cloud_server.CloudServer('WebServer',
- resource_defns['WebServer'], stack)
- server.metadata = {'rax_service_level_automation': 'Complete'}
- self.patchobject(server, 'store_external_ports')
- self.patchobject(server, "_populate_deployments_metadata")
- mock_servers_create = mock.Mock(return_value=return_server)
- self.fc.servers.create = mock_servers_create
- self.patchobject(self.fc.servers, 'get',
- return_value=return_server)
- scheduler.TaskRunner(server.create)()
- mock_servers_create.assert_called_with(
- image=mock.ANY,
- flavor=mock.ANY,
- key_name=mock.ANY,
- name=mock.ANY,
- security_groups=mock.ANY,
- userdata=mock.ANY,
- scheduler_hints=mock.ANY,
- meta=mock.ANY,
- nics=mock.ANY,
- availability_zone=mock.ANY,
- block_device_mapping=mock.ANY,
- block_device_mapping_v2=mock.ANY,
- config_drive=result,
- disk_config=mock.ANY,
- reservation_id=mock.ANY,
- files=mock.ANY,
- admin_pass=mock.ANY)
-
- def test_server_user_data_no_config_drive(self):
- self._test_server_config_drive("my script", False, True)
-
- def test_server_user_data_config_drive(self):
- self._test_server_config_drive("my script", True, True)
-
- def test_server_no_user_data_config_drive(self):
- self._test_server_config_drive(None, True, True)
-
- def test_server_no_user_data_no_config_drive(self):
- self._test_server_config_drive(None, False, False)
-
- def test_server_no_user_data_software_config(self):
- self._test_server_config_drive(None, False, True,
- ud_format="SOFTWARE_CONFIG")
-
-
-@mock.patch.object(resource.Resource, "client_plugin")
-@mock.patch.object(resource.Resource, "client")
-class CloudServersValidationTests(common.HeatTestCase):
- def setUp(self):
- super(CloudServersValidationTests, self).setUp()
- resource._register_class("OS::Nova::Server", cloud_server.CloudServer)
- properties_server = {
- "image": "CentOS 5.2",
- "flavor": "256 MB Server",
- "key_name": "test",
- "user_data": "wordpress",
- }
- self.mockstack = mock.Mock()
- self.mockstack.has_cache_data.return_value = False
- self.mockstack.db_resource_get.return_value = None
- self.rsrcdef = rsrc_defn.ResourceDefinition(
- "test", cloud_server.CloudServer, properties=properties_server)
-
- def test_validate_no_image(self, mock_client, mock_plugin):
- properties_server = {
- "flavor": "256 MB Server",
- "key_name": "test",
- "user_data": "wordpress",
- }
-
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", cloud_server.CloudServer, properties=properties_server)
- mock_plugin().find_flavor_by_name_or_id.return_value = 1
- server = cloud_server.CloudServer("test", rsrcdef, self.mockstack)
- mock_boot_vol = self.patchobject(
- server, '_validate_block_device_mapping')
- mock_boot_vol.return_value = True
- self.assertIsNone(server.validate())
-
- def test_validate_no_image_bfv(self, mock_client, mock_plugin):
- properties_server = {
- "flavor": "256 MB Server",
- "key_name": "test",
- "user_data": "wordpress",
- }
- rsrcdef = rsrc_defn.ResourceDefinition(
- "test", cloud_server.CloudServer, properties=properties_server)
-
- mock_plugin().find_flavor_by_name_or_id.return_value = 1
- server = cloud_server.CloudServer("test", rsrcdef, self.mockstack)
-
- mock_boot_vol = self.patchobject(
- server, '_validate_block_device_mapping')
- mock_boot_vol.return_value = True
-
- mock_flavor = mock.Mock(ram=4)
- mock_flavor.to_dict.return_value = {
- 'OS-FLV-WITH-EXT-SPECS:extra_specs': {
- 'class': 'standard1',
- },
- }
-
- mock_plugin().get_flavor.return_value = mock_flavor
- error = self.assertRaises(
- exception.StackValidationFailed, server.validate)
- self.assertEqual(
- 'Flavor 256 MB Server cannot be booted from volume.',
- six.text_type(error))
-
- def test_validate_bfv_volume_only(self, mock_client, mock_plugin):
- mock_plugin().find_flavor_by_name_or_id.return_value = 1
- mock_plugin().find_image_by_name_or_id.return_value = 1
- server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
-
- mock_flavor = mock.Mock(ram=4, disk=4)
- mock_flavor.to_dict.return_value = {
- 'OS-FLV-WITH-EXT-SPECS:extra_specs': {
- 'class': 'memory1',
- },
- }
-
- mock_image = mock.Mock(status='ACTIVE', min_ram=2, min_disk=1)
- mock_image.get.return_value = "memory1"
- mock_image.__iter__ = mock.Mock(return_value=iter([]))
-
- mock_plugin().get_flavor.return_value = mock_flavor
- mock_plugin().get_image.return_value = mock_image
-
- error = self.assertRaises(
- exception.StackValidationFailed, server.validate)
- self.assertEqual(
- 'Flavor 256 MB Server must be booted from volume, '
- 'but image CentOS 5.2 was also specified.',
- six.text_type(error))
-
- def test_validate_image_flavor_excluded_class(self, mock_client,
- mock_plugin):
- mock_plugin().find_flavor_by_name_or_id.return_value = 1
- mock_plugin().find_image_by_name_or_id.return_value = 1
- server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
-
- mock_image = mock.Mock(status='ACTIVE', min_ram=2, min_disk=1)
- mock_image.get.return_value = "!standard1, *"
- mock_image.__iter__ = mock.Mock(return_value=iter([]))
-
- mock_flavor = mock.Mock(ram=4, disk=4)
- mock_flavor.to_dict.return_value = {
- 'OS-FLV-WITH-EXT-SPECS:extra_specs': {
- 'class': 'standard1',
- },
- }
-
- mock_plugin().get_flavor.return_value = mock_flavor
- mock_plugin().get_image.return_value = mock_image
-
- error = self.assertRaises(
- exception.StackValidationFailed, server.validate)
- self.assertEqual(
- 'Flavor 256 MB Server cannot be used with image CentOS 5.2.',
- six.text_type(error))
-
- def test_validate_image_flavor_ok(self, mock_client, mock_plugin):
- mock_plugin().find_flavor_by_name_or_id.return_value = 1
- mock_plugin().find_image_by_name_or_id.return_value = 1
- server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
-
- mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
- mock_image.get.return_value = "standard1"
- mock_image.__iter__ = mock.Mock(return_value=iter([]))
-
- mock_flavor = mock.Mock(ram=4, disk=4)
- mock_flavor.to_dict.return_value = {
- 'OS-FLV-WITH-EXT-SPECS:extra_specs': {
- 'class': 'standard1',
- 'disk_io_index': 1,
- },
- }
-
- mock_plugin().get_flavor.return_value = mock_flavor
- mock_plugin().get_image.return_value = mock_image
-
- self.assertIsNone(server.validate())
-
- def test_validate_image_flavor_empty_metadata(self, mock_client,
- mock_plugin):
- server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
-
- mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
- mock_image.get.return_value = ""
- mock_image.__iter__ = mock.Mock(return_value=iter([]))
-
- mock_flavor = mock.Mock(ram=4, disk=4)
- mock_flavor.to_dict.return_value = {
- 'OS-FLV-WITH-EXT-SPECS:extra_specs': {
- 'flavor_classes': '',
- },
- }
-
- mock_plugin().get_flavor.return_value = mock_flavor
- mock_plugin().get_image.return_value = mock_image
-
- self.assertIsNone(server.validate())
-
- def test_validate_image_flavor_no_metadata(self, mock_client, mock_plugin):
- server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
-
- mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
- mock_image.get.return_value = None
- mock_image.__iter__ = mock.Mock(return_value=iter([]))
-
- mock_flavor = mock.Mock(ram=4, disk=4)
- mock_flavor.to_dict.return_value = {}
-
- mock_plugin().get_flavor.return_value = mock_flavor
- mock_plugin().get_image.return_value = mock_image
-
- self.assertIsNone(server.validate())
-
- def test_validate_image_flavor_not_base(self, mock_client, mock_plugin):
- server = cloud_server.CloudServer("test", self.rsrcdef, self.mockstack)
-
- mock_image = mock.Mock(size=1, status='ACTIVE', min_ram=2, min_disk=2)
- mock_image.get.return_value = None
- mock_image.__iter__ = mock.Mock(return_value=iter(
- ['base_image_ref']))
- mock_image.__getitem__ = mock.Mock(return_value='1234')
-
- mock_base_image = mock.Mock(size=1, status='ACTIVE', min_ram=2,
- min_disk=2)
- mock_base_image.get.return_value = None
- mock_base_image.__iter__ = mock.Mock(return_value=iter([]))
-
- mock_flavor = mock.Mock(ram=4, disk=4)
- mock_flavor.to_dict.return_value = {}
-
- mock_plugin().get_flavor.return_value = mock_flavor
- mock_plugin().get_image.side_effect = [mock_image, mock_base_image]
-
- self.assertIsNone(server.validate())
diff --git a/contrib/rackspace/rackspace/tests/test_rackspace_dns.py b/contrib/rackspace/rackspace/tests/test_rackspace_dns.py
deleted file mode 100644
index 77feca7ef..000000000
--- a/contrib/rackspace/rackspace/tests/test_rackspace_dns.py
+++ /dev/null
@@ -1,316 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import mock
-
-from heat.common import exception
-from heat.common import template_format
-from heat.engine import environment
-from heat.engine import resource
-from heat.engine import rsrc_defn
-from heat.engine import scheduler
-from heat.engine import stack as parser
-from heat.engine import template
-from heat.tests import common
-from heat.tests import utils
-
-from ..resources import cloud_dns # noqa
-
-domain_only_template = '''
-{
- "AWSTemplateFormatVersion" : "2010-09-09",
- "Description" : "Dns instance running on Rackspace cloud",
- "Parameters" : {
- "UnittestDomain" : {
- "Description" : "Domain for unit tests",
- "Type" : "String",
- "Default" : 'dnsheatunittest.com'
- },
- "dnsttl" : {
- "Description" : "TTL for the domain",
- "Type" : "Number",
- "MinValue" : '301',
- "Default" : '301'
- },
- "name": {
- "Description" : "The cloud dns instance name",
- "Type": "String",
- "Default": "CloudDNS"
- }
- },
- "Resources" : {
- "domain" : {
- "Type": "Rackspace::Cloud::DNS",
- "Properties" : {
- "name" : "dnsheatunittest.com",
- "emailAddress" : "admin@dnsheatunittest.com",
- "ttl" : 3600,
- "comment" : "Testing Cloud DNS integration with Heat"
- }
- }
- }
-}
-'''
-
-
-class FakeDnsInstance(object):
- def __init__(self):
- self.id = 4
- self.resource_id = 4
-
- def get(self):
- pass
-
- def delete(self):
- pass
-
-
-class RackspaceDnsTest(common.HeatTestCase):
-
- def setUp(self):
- super(RackspaceDnsTest, self).setUp()
- # Test environment may not have pyrax client library installed and if
- # pyrax is not installed resource class would not be registered.
- # So register resource provider class explicitly for unit testing.
- resource._register_class("Rackspace::Cloud::DNS", cloud_dns.CloudDns)
- self.create_domain_only_args = {
- "name": 'dnsheatunittest.com',
- "emailAddress": 'admin@dnsheatunittest.com',
- "ttl": 3600,
- "comment": 'Testing Cloud DNS integration with Heat',
- "records": None
- }
- self.update_domain_only_args = {
- "emailAddress": 'updatedEmail@example.com',
- "ttl": 5555,
- "comment": 'updated comment'
- }
-
- def _setup_test_cloud_dns_instance(self, name, parsed_t):
- stack_name = '%s_stack' % name
- t = parsed_t
- templ = template.Template(
- t, env=environment.Environment({'name': 'test'}))
- self.stack = parser.Stack(utils.dummy_context(),
- stack_name,
- templ,
- stack_id=str(uuid.uuid4()))
-
- instance = cloud_dns.CloudDns(
- '%s_name' % name,
- templ.resource_definitions(self.stack)['domain'],
- self.stack)
- return instance
-
- def _stubout_create(self, instance, fake_dnsinstance, **create_args):
- mock_client = self.m.CreateMockAnything()
- self.m.StubOutWithMock(instance, 'cloud_dns')
- instance.cloud_dns().AndReturn(mock_client)
- self.m.StubOutWithMock(mock_client, "create")
- mock_client.create(**create_args).AndReturn(fake_dnsinstance)
- self.m.ReplayAll()
-
- def _stubout_update(
- self,
- instance,
- fake_dnsinstance,
- updateRecords=None,
- **update_args):
- mock_client = self.m.CreateMockAnything()
- self.m.StubOutWithMock(instance, 'cloud_dns')
- instance.cloud_dns().AndReturn(mock_client)
- self.m.StubOutWithMock(mock_client, "get")
- mock_domain = self.m.CreateMockAnything()
- mock_client.get(fake_dnsinstance.resource_id).AndReturn(mock_domain)
- self.m.StubOutWithMock(mock_domain, "update")
- mock_domain.update(**update_args).AndReturn(fake_dnsinstance)
- if updateRecords:
- fake_records = list()
- mock_domain.list_records().AndReturn(fake_records)
- mock_domain.add_records([{
- 'comment': None,
- 'priority': None,
- 'type': 'A',
- 'name': 'ftp.example.com',
- 'data': '192.0.2.8',
- 'ttl': 3600}])
- self.m.ReplayAll()
-
- def _get_create_args_with_comments(self, record):
- record_with_comment = [dict(record[0])]
- record_with_comment[0]["comment"] = None
- create_record_args = dict()
- create_record_args['records'] = record_with_comment
- create_args = dict(
- list(self.create_domain_only_args.items()) +
- list(create_record_args.items()))
- return create_args
-
- def test_create_domain_only(self):
- """Test domain create only without any records."""
- fake_dns_instance = FakeDnsInstance()
- t = template_format.parse(domain_only_template)
- instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)
- create_args = self.create_domain_only_args
- self._stubout_create(instance, fake_dns_instance, **create_args)
- scheduler.TaskRunner(instance.create)()
- self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
- self.m.VerifyAll()
-
- def test_create_domain_with_a_record(self):
- """Test domain create with an A record.
-
- This should not have a priority field.
- """
- fake_dns_instance = FakeDnsInstance()
- t = template_format.parse(domain_only_template)
- a_record = [{
- "type": "A",
- "name": "ftp.example.com",
- "data": "192.0.2.8",
- "ttl": 3600
- }]
- t['Resources']['domain']['Properties']['records'] = a_record
- instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)
- create_args = self._get_create_args_with_comments(a_record)
- self._stubout_create(instance, fake_dns_instance, **create_args)
- scheduler.TaskRunner(instance.create)()
- self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
- self.m.VerifyAll()
-
- def test_create_domain_with_mx_record(self):
- """Test domain create with an MX record.
-
- This should have a priority field.
- """
- fake_dns_instance = FakeDnsInstance()
- t = template_format.parse(domain_only_template)
- mx_record = [{
- "type": "MX",
- "name": "example.com",
- "data": "mail.example.com",
- "priority": 5,
- "ttl": 3600
- }]
- t['Resources']['domain']['Properties']['records'] = mx_record
- instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)
- create_args = self._get_create_args_with_comments(mx_record)
- self._stubout_create(instance, fake_dns_instance, **create_args)
- scheduler.TaskRunner(instance.create)()
- self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
- self.m.VerifyAll()
-
- def test_check(self):
- t = template_format.parse(domain_only_template)
- instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)
-
- mock_get = mock.Mock()
- instance.cloud_dns = mock.Mock()
- instance.cloud_dns.return_value.get = mock_get
- scheduler.TaskRunner(instance.check)()
- self.assertEqual('CHECK', instance.action)
- self.assertEqual('COMPLETE', instance.status)
-
- mock_get.side_effect = cloud_dns.NotFound('boom')
- exc = self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(instance.check))
- self.assertEqual('CHECK', instance.action)
- self.assertEqual('FAILED', instance.status)
- self.assertIn('boom', str(exc))
-
- def test_update(self, updateRecords=None):
- """Helper function for testing domain updates."""
- fake_dns_instance = FakeDnsInstance()
- t = template_format.parse(domain_only_template)
- instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)
- instance.resource_id = 4
- update_args = self.update_domain_only_args
- self._stubout_update(
- instance,
- fake_dns_instance,
- updateRecords,
- **update_args)
-
- uprops = dict(instance.properties)
- uprops.update({
- 'emailAddress': 'updatedEmail@example.com',
- 'ttl': 5555,
- 'comment': 'updated comment',
- })
- if updateRecords:
- uprops['records'] = updateRecords
- ut = rsrc_defn.ResourceDefinition(instance.name,
- instance.type(),
- uprops)
- instance.state_set(instance.CREATE, instance.COMPLETE)
- scheduler.TaskRunner(instance.update, ut)()
- self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
- self.m.VerifyAll()
-
- def test_update_domain_only(self):
- """Test domain update without any records."""
- self.test_update()
-
- def test_update_domain_with_a_record(self):
- """Test domain update with an A record."""
- a_record = [{'type': 'A',
- 'name': 'ftp.example.com',
- 'data': '192.0.2.8',
- 'ttl': 3600}]
- self.test_update(updateRecords=a_record)
-
- def test_update_record_only(self):
- """Helper function for testing domain updates."""
- fake_dns_instance = FakeDnsInstance()
- t = template_format.parse(domain_only_template)
- instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)
- instance.resource_id = 4
- update_records = [{'type': 'A',
- 'name': 'ftp.example.com',
- 'data': '192.0.2.8',
- 'ttl': 3600}]
-
- mock_client = self.m.CreateMockAnything()
- self.m.StubOutWithMock(instance, 'cloud_dns')
- instance.cloud_dns().AndReturn(mock_client)
- self.m.StubOutWithMock(mock_client, "get")
- mock_domain = self.m.CreateMockAnything()
- mock_client.get(fake_dns_instance.resource_id).AndReturn(mock_domain)
-
- # mock_domain.update shouldn't be called in this scenario, so
- # stub it out but don't record a call to it
- self.m.StubOutWithMock(mock_domain, "update")
-
- fake_records = list()
- mock_domain.list_records().AndReturn(fake_records)
- mock_domain.add_records([{
- 'comment': None,
- 'priority': None,
- 'type': 'A',
- 'name': 'ftp.example.com',
- 'data': '192.0.2.8',
- 'ttl': 3600}])
- self.m.ReplayAll()
-
- uprops = dict(instance.properties)
- uprops['records'] = update_records
- ut = rsrc_defn.ResourceDefinition(instance.name,
- instance.type(),
- uprops)
- instance.state_set(instance.CREATE, instance.COMPLETE)
-
- scheduler.TaskRunner(instance.update, ut)()
- self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
- self.m.VerifyAll()
diff --git a/contrib/rackspace/requirements.txt b/contrib/rackspace/requirements.txt
deleted file mode 100644
index 5d197dd52..000000000
--- a/contrib/rackspace/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
--e git+https://github.com/rackerlabs/heat-pyrax.git#egg=pyrax
diff --git a/contrib/rackspace/setup.cfg b/contrib/rackspace/setup.cfg
deleted file mode 100644
index 04d62be99..000000000
--- a/contrib/rackspace/setup.cfg
+++ /dev/null
@@ -1,43 +0,0 @@
-[metadata]
-name = heat-contrib-rackspace
-summary = Heat resources for working with the Rackspace Cloud
-description-file =
- README.md
-author = OpenStack
-author-email = openstack-dev@lists.openstack.org
-home-page = http://docs.openstack.org/developer/heat/
-classifier =
- Environment :: OpenStack
- Intended Audience :: Information Technology
- Intended Audience :: System Administrators
- License :: OSI Approved :: Apache Software License
- Operating System :: POSIX :: Linux
- Programming Language :: Python
- Programming Language :: Python :: 2
- Programming Language :: Python :: 2.7
-
-[files]
-packages =
- rackspace
-
-# Copy to /usr/lib/heat for non-stevedore plugin loading
-data_files =
- lib/heat/rackspace = rackspace/resources/*
- lib/heat/heat_keystoneclient_v2 = heat_keystoneclient_v2/*
-
-[entry_points]
-
-heat.clients =
- auto_scale = rackspace.clients:RackspaceAutoScaleClient
- cinder = rackspace.clients:RackspaceCinderClient
- cloud_dns = rackspace.clients:RackspaceCloudDNSClient
- cloud_lb = rackspace.clients:RackspaceCloudLBClient
- cloud_networks = rackspace.clients:RackspaceCloudNetworksClient
- glance = rackspace.clients:RackspaceGlanceClient
- nova = rackspace.clients:RackspaceNovaClient
- trove = rackspace.clients:RackspaceTroveClient
- swift = rackspace.clients:RackspaceSwiftClient
-
-[global]
-setup-hooks =
- pbr.hooks.setup_hook
diff --git a/contrib/rackspace/setup.py b/contrib/rackspace/setup.py
deleted file mode 100644
index 736375744..000000000
--- a/contrib/rackspace/setup.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
-import setuptools
-
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
-setuptools.setup(
- setup_requires=['pbr'],
- pbr=True)
diff --git a/devstack/lib/heat b/devstack/lib/heat
index cceb7c384..caed12f98 100644
--- a/devstack/lib/heat
+++ b/devstack/lib/heat
@@ -5,7 +5,7 @@
# To enable, add the following to localrc
#
-# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
+# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-eng
# Dependencies:
# (none)
@@ -45,7 +45,6 @@ HEAT_USE_APACHE=${HEAT_USE_APACHE:-${HEAT_USE_MOD_WSGI:-True}}
HEAT_DIR=$DEST/heat
HEAT_FILES_DIR=$HEAT_DIR/heat/httpd/files
-HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
HEAT_STANDALONE=$(trueorfalse False HEAT_STANDALONE)
HEAT_ENABLE_ADOPT_ABANDON=$(trueorfalse False HEAT_ENABLE_ADOPT_ABANDON)
HEAT_CONF_DIR=/etc/heat
@@ -63,10 +62,8 @@ HEAT_TRUSTEE_DOMAIN=${HEAT_TRUSTEE_DOMAIN:-default}
HEAT_BIN_DIR=$(get_python_exec_prefix)
HEAT_API_UWSGI_CONF=$HEAT_CONF_DIR/heat-api-uwsgi.ini
HEAT_CFN_API_UWSGI_CONF=$HEAT_CONF_DIR/heat-api-cfn-uwsgi.ini
-HEAT_CW_API_UWSGI_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch-uwsgi.ini
HEAT_API_UWSGI=$HEAT_BIN_DIR/heat-wsgi-api
HEAT_CFN_API_UWSGI=$HEAT_BIN_DIR/heat-wsgi-api-cfn
-HEAT_CW_API_UWSGI=$HEAT_BIN_DIR/heat-wsgi-api-cloudwatch
# other default options
if [[ "$HEAT_STANDALONE" == "True" ]]; then
@@ -102,7 +99,6 @@ function cleanup_heat {
if [[ "$HEAT_USE_APACHE" == "True" ]]; then
_cleanup_heat_apache_wsgi
fi
- sudo rm -rf $HEAT_AUTH_CACHE_DIR
sudo rm -rf $HEAT_ENV_DIR
sudo rm -rf $HEAT_TEMPLATES_DIR
sudo rm -rf $HEAT_CONF_DIR
@@ -119,24 +115,18 @@ function configure_heat {
HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000}
HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST}
HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001}
- HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP}
- HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003}
HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini
- HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json
cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE
- cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE
# common options
iniset_rpc_backend heat $HEAT_CONF
if [[ "$HEAT_USE_APACHE" == "True" && "$WSGI_MODE" == "uwsgi" ]]; then
iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST/heat-api-cfn
iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST/heat-api-cfn/v1/waitcondition
- iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST/heat-api-cloudwatch
else
iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT
iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition
- iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
fi
iniset $HEAT_CONF database connection `database_connection_url heat`
@@ -169,7 +159,6 @@ function configure_heat {
# approximation.
iniset "$HEAT_API_UWSGI_CONF" uwsgi threads 4
write_uwsgi_config "$HEAT_CFN_API_UWSGI_CONF" "$HEAT_CFN_API_UWSGI" "/heat-api-cfn"
- write_uwsgi_config "$HEAT_CW_API_UWSGI_CONF" "$HEAT_CW_API_UWSGI" "/heat-api-cloudwatch"
else
_config_heat_apache_wsgi
fi
@@ -179,7 +168,7 @@ function configure_heat {
iniset $HEAT_CONF paste_deploy flavor standalone
iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s"
else
- configure_auth_token_middleware $HEAT_CONF heat $HEAT_AUTH_CACHE_DIR
+ configure_auth_token_middleware $HEAT_CONF heat
fi
# If HEAT_DEFERRED_AUTH is unset or explicitly set to trusts, configure
@@ -202,9 +191,6 @@ function configure_heat {
# Cloudformation API
iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT
- # Cloudwatch API
- iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT
-
if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
iniset $HEAT_CONF clients_keystone ca_file $SSL_BUNDLE_FILE
fi
@@ -276,14 +262,8 @@ function init_heat {
recreate_database heat
$HEAT_BIN_DIR/heat-manage db_sync
fi
- create_heat_cache_dir
}
-# create_heat_cache_dir() - Part of the init_heat() process
-function create_heat_cache_dir {
- # Create cache dirs
- sudo install -d -o $STACK_USER $HEAT_AUTH_CACHE_DIR
-}
# install_heatclient() - Collect source and prepare
function install_heatclient {
@@ -318,29 +298,24 @@ function start_heat {
if [[ -f ${enabled_site_file} && "$WSGI_MODE" != "uwsgi" ]]; then
enable_apache_site heat-api
enable_apache_site heat-api-cfn
- enable_apache_site heat-api-cloudwatch
restart_apache_server
tail_log heat-api /var/log/$APACHE_NAME/heat_api.log
tail_log heat-api-access /var/log/$APACHE_NAME/heat_api_access.log
tail_log heat-api-cfn /var/log/$APACHE_NAME/heat_api_cfn.log
tail_log heat-api-cfn-access /var/log/$APACHE_NAME/heat_api_cfn_access.log
- tail_log heat-api-cloudwatch /var/log/$APACHE_NAME/heat_api_cloudwatch.log
- tail_log heat-api-cloudwatch-access /var/log/$APACHE_NAME/heat_api_cloudwatch_access.log
else
run_process h-api "$HEAT_BIN_DIR/uwsgi --ini $HEAT_API_UWSGI_CONF" ""
run_process h-api-cfn "$HEAT_BIN_DIR/uwsgi --ini $HEAT_CFN_API_UWSGI_CONF" ""
- run_process h-api-cw "$HEAT_BIN_DIR/uwsgi --ini $HEAT_CW_API_UWSGI_CONF" ""
fi
else
run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF"
run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF"
- run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF"
fi
}
function _stop_processes {
local serv
- for serv in h-api h-api-cfn h-api-cw; do
+ for serv in h-api h-api-cfn; do
stop_process $serv
done
}
@@ -356,7 +331,6 @@ function stop_heat {
else
disable_apache_site heat-api
disable_apache_site heat-api-cfn
- disable_apache_site heat-api-cloudwatch
restart_apache_server
fi
else
@@ -364,16 +338,22 @@ function stop_heat {
fi
}
+# TODO(ramishra): Remove after Queens
+function stop_cw_service {
+ if $SYSTEMCTL is-enabled devstack@h-api-cw.service; then
+ $SYSTEMCTL stop devstack@h-api-cw.service
+ $SYSTEMCTL disable devstack@h-api-cw.service
+ fi
+}
+
# _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
function _cleanup_heat_apache_wsgi {
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
remove_uwsgi_config "$HEAT_API_UWSGI_CONF" "$HEAT_API_UWSGI"
remove_uwsgi_config "$HEAT_CFN_API_UWSGI_CONF" "$HEAT_CFN_API_UWSGI"
- remove_uwsgi_config "$HEAT_CW_API_UWSGI_CONF" "$HEAT_CW_API_UWSGI"
fi
sudo rm -f $(apache_site_config_for heat-api)
sudo rm -f $(apache_site_config_for heat-api-cfn)
- sudo rm -f $(apache_site_config_for heat-api-cloudwatch)
}
# _config_heat_apache_wsgi() - Set WSGI config files of Heat
@@ -383,14 +363,11 @@ function _config_heat_apache_wsgi {
heat_apache_conf=$(apache_site_config_for heat-api)
local heat_cfn_apache_conf
heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn)
- local heat_cloudwatch_apache_conf
- heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch)
local heat_ssl=""
local heat_certfile=""
local heat_keyfile=""
local heat_api_port=$HEAT_API_PORT
local heat_cfn_api_port=$HEAT_API_CFN_PORT
- local heat_cw_api_port=$HEAT_API_CW_PORT
local venv_path=""
sudo cp $HEAT_FILES_DIR/heat-api.conf $heat_apache_conf
@@ -419,18 +396,6 @@ function _config_heat_apache_wsgi {
s|%VIRTUALENV%|$venv_path|g
" -i $heat_cfn_apache_conf
- sudo cp $HEAT_FILES_DIR/heat-api-cloudwatch.conf $heat_cloudwatch_apache_conf
- sudo sed -e "
- s|%PUBLICPORT%|$heat_cw_api_port|g;
- s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
- s|%API_WORKERS%|$API_WORKERS|g;
- s|%SSLENGINE%|$heat_ssl|g;
- s|%SSLCERTFILE%|$heat_certfile|g;
- s|%SSLKEYFILE%|$heat_keyfile|g;
- s|%USER%|$STACK_USER|g;
- s|%VIRTUALENV%|$venv_path|g
- " -i $heat_cloudwatch_apache_conf
}
diff --git a/devstack/settings b/devstack/settings
index beaa5ae4c..f185d2a65 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -6,5 +6,3 @@
enable_service h-eng
enable_service h-api
enable_service h-api-cfn
-enable_service h-api-cw
-
diff --git a/devstack/upgrade/resources.sh b/devstack/upgrade/resources.sh
index 4a2cf263c..6057324e7 100755
--- a/devstack/upgrade/resources.sh
+++ b/devstack/upgrade/resources.sh
@@ -36,11 +36,27 @@ function _heat_set_user {
OS_PROJECT_DOMAIN_ID=$DEFAULT_DOMAIN
}
-function _run_heat_api_tests {
+function _write_heat_integrationtests {
+ local upgrade_tests=$1
+ cat > $upgrade_tests <<EOF
+heat_tempest_plugin.tests.api
+heat_integrationtests.functional.test_autoscaling
+heat_integrationtests.functional.test_cancel_update
+heat_integrationtests.functional.test_create_update
+heat_integrationtests.functional.test_instance_group
+heat_integrationtests.functional.test_resource_group.ResourceGroupTest
+heat_integrationtests.functional.test_resource_group.ResourceGroupUpdatePolicyTest
+heat_integrationtests.functional.test_software_deployment_group
+heat_integrationtests.functional.test_validation
+heat_tempest_plugin.tests.functional.test_software_config.ParallelDeploymentsTest
+heat_tempest_plugin.tests.functional.test_nova_server_networks
+EOF
+}
+
+function _run_heat_integrationtests {
local devstack_dir=$1
pushd $devstack_dir/../tempest
- sed -i -e '/group_regex/c\group_regex=heat_integrationtests\\.api\\.test_heat_api(?:\\.|_)([^_]+)' .testr.conf
conf_file=etc/tempest.conf
iniset_multiline $conf_file service_available heat_plugin True
iniset $conf_file heat_plugin username $OS_USERNAME
@@ -53,14 +69,27 @@ function _run_heat_api_tests {
iniset $conf_file heat_plugin project_domain_name $OS_PROJECT_DOMAIN_NAME
iniset $conf_file heat_plugin region $OS_REGION_NAME
iniset $conf_file heat_plugin auth_version $OS_IDENTITY_API_VERSION
- tempest run --regex heat_integrationtests.api
+
+ export DEST=$(dirname $devstack_dir)
+ $DEST/heat/heat_integrationtests/prepare_test_env.sh
+ $DEST/heat/heat_integrationtests/prepare_test_network.sh
+
+ # Run set of specified functional tests
+ UPGRADE_TESTS=upgrade_tests.list
+ _write_heat_integrationtests $UPGRADE_TESTS
+
+ tox -evenv-tempest -- stestr --test-path=$DEST/heat/heat_integrationtests --top-dir=$DEST/heat \
+ --group_regex='heat_tempest_plugin\.tests\.api\.test_heat_api[._]([^_]+)' \
+ run --whitelist-file $UPGRADE_TESTS
+ _heat_set_user
popd
}
function create {
- # run heat api tests instead of tempest smoke before create
- _run_heat_api_tests $BASE_DEVSTACK_DIR
+ # run heat integration tests instead of tempest smoke before create
+ _run_heat_integrationtests $BASE_DEVSTACK_DIR
+ source $TOP_DIR/openrc admin admin
# creates a tenant for the server
eval $(openstack project create -f shell -c id $HEAT_PROJECT)
if [[ -z "$id" ]]; then
@@ -94,7 +123,7 @@ function verify {
_heat_set_user
local side="$1"
if [[ "$side" = "post-upgrade" ]]; then
- _run_heat_api_tests $TARGET_DEVSTACK_DIR
+ _run_heat_integrationtests $TARGET_DEVSTACK_DIR
fi
stack_name=$(resource_get heat stack_name)
heat stack-show $stack_name
diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings
index 23f946e54..2b774b757 100644
--- a/devstack/upgrade/settings
+++ b/devstack/upgrade/settings
@@ -1,6 +1,8 @@
register_project_for_upgrade heat
register_db_to_save heat
-devstack_localrc base enable_service h-api h-api-cfn h-api-cw h-eng heat tempest
-devstack_localrc target enable_service h-api h-api-cfn h-api-cw h-eng heat tempest
+devstack_localrc base enable_service h-api h-api-cfn h-eng heat tempest
+devstack_localrc target enable_service h-api h-api-cfn h-eng heat tempest
BASE_RUN_SMOKE=False
TARGET_RUN_SMOKE=False
+
+export HOST_TOPOLOGY=${HOST_TOPOLOGY}
diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh
index 2e429a3f5..dfe566cee 100755
--- a/devstack/upgrade/shutdown.sh
+++ b/devstack/upgrade/shutdown.sh
@@ -30,7 +30,11 @@ set -o xtrace
stop_heat
-SERVICES_DOWN="heat-api heat-engine heat-api-cfn heat-api-cloudwatch"
+# stop cloudwatch service if running
+# TODO(ramishra): Remove it after Queens
+stop_cw_service
+
+SERVICES_DOWN="heat-api heat-engine heat-api-cfn"
# sanity check that services are actually down
ensure_services_stopped $SERVICES_DOWN
diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh
index 905d48a5f..f73639fec 100755
--- a/devstack/upgrade/upgrade.sh
+++ b/devstack/upgrade/upgrade.sh
@@ -58,6 +58,7 @@ source $TARGET_DEVSTACK_DIR/stackrc
source $TARGET_DEVSTACK_DIR/lib/tls
source $TARGET_DEVSTACK_DIR/lib/stack
source $TARGET_DEVSTACK_DIR/lib/apache
+source $TARGET_DEVSTACK_DIR/lib/rpc_backend
# Get heat functions from devstack plugin
source $HEAT_DEVSTACK_DIR/lib/heat
@@ -72,12 +73,17 @@ set -o xtrace
# Install the target heat
source $HEAT_DEVSTACK_DIR/plugin.sh stack install
+# Change transport-url in the host which runs upgrade script (primary)
+if [[ "${HOST_TOPOLOGY}" == "multinode" ]]; then
+ vhost="newvhost"
+ rpc_backend_add_vhost $vhost
+ iniset_rpc_backend heat $HEAT_CONF DEFAULT $vhost
+fi
+
# calls upgrade-heat for specific release
upgrade_project heat $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
# Simulate init_heat()
-create_heat_cache_dir
-
HEAT_BIN_DIR=$(dirname $(which heat-manage))
$HEAT_BIN_DIR/heat-manage --config-file $HEAT_CONF db_sync || die $LINENO "DB sync error"
@@ -86,7 +92,7 @@ start_heat
# Don't succeed unless the services come up
# Truncating some service names to 11 characters
-ensure_services_started heat-api heat-engine heat-api-cl heat-api-cf
+ensure_services_started heat-api heat-engine heat-api-cf
set +o xtrace
echo "*********************************************************************"
diff --git a/doc/source/conf.py b/doc/source/conf.py
index b12725136..bfdcfc3db 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -78,10 +78,21 @@ extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'openstackdocstheme',
+ 'oslo_config.sphinxconfiggen',
+ 'oslo_policy.sphinxext',
+ 'oslo_policy.sphinxpolicygen',
'ext.resources',
'ext.tablefromtext',
'stevedore.sphinxext']
+# policy sample file generation
+policy_generator_config_file = '../../etc/heat/heat-policy-generator.conf'
+sample_policy_basename = '_static/heat'
+
+# oslo_config.sphinxconfiggen options
+config_generator_config_file = '../../config-generator.conf'
+sample_config_basename = '_static/heat'
+
# openstackdocstheme options
repository_name = 'openstack/heat'
bug_project = 'heat'
@@ -180,7 +191,7 @@ html_theme_options = {"sidebar_mode": "toc"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-# html_static_path = ['_static']
+html_static_path = ['_static']
# Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
@@ -290,9 +301,6 @@ man_pages = [
('man/heat-api-cfn', 'heat-api-cfn',
u'CloudFormation compatible API service to the heat project.',
[u'Heat Developers'], 1),
- ('man/heat-api-cloudwatch', 'heat-api-cloudwatch',
- u'CloudWatch alike API service to the heat project',
- [u'Heat Developers'], 1),
('man/heat-db-setup', 'heat-db-setup',
u'Command line utility to setup the Heat database',
[u'Heat Developers'], 1),
diff --git a/doc/source/configuration/api.rst b/doc/source/configuration/api.rst
index 2b4f718e0..2b1ced2a5 100644
--- a/doc/source/configuration/api.rst
+++ b/doc/source/configuration/api.rst
@@ -7,10 +7,9 @@ Configuration options
The following options allow configuration of the APIs that Orchestration
supports. Currently this includes compatibility APIs for CloudFormation
-and CloudWatch and a native API.
+and a native API.
.. include:: ./tables/heat-api.rst
.. include:: ./tables/heat-cfn_api.rst
-.. include:: ./tables/heat-cloudwatch_api.rst
.. include:: ./tables/heat-metadata_api.rst
.. include:: ./tables/heat-waitcondition_api.rst
diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst
index 57eb31798..72376426d 100644
--- a/doc/source/configuration/index.rst
+++ b/doc/source/configuration/index.rst
@@ -9,3 +9,5 @@ Configuring Heat
clients.rst
config-options.rst
logs.rst
+ sample_config.rst
+ sample_policy.rst \ No newline at end of file
diff --git a/doc/source/configuration/sample_config.rst b/doc/source/configuration/sample_config.rst
new file mode 100644
index 000000000..f572f4844
--- /dev/null
+++ b/doc/source/configuration/sample_config.rst
@@ -0,0 +1,12 @@
+=========================
+Heat Configuration Sample
+=========================
+
+The following is a sample heat configuration for adaptation and use. It is
+auto-generated from heat when this documentation is built, so if you are
+having issues with an option, please compare your version of heat with the
+version of this documentation.
+
+The sample configuration can also be downloaded in `file form <../_static/heat.conf.sample>`_.
+
+.. literalinclude:: ../_static/heat.conf.sample
diff --git a/doc/source/configuration/sample_policy.rst b/doc/source/configuration/sample_policy.rst
new file mode 100644
index 000000000..78814abe8
--- /dev/null
+++ b/doc/source/configuration/sample_policy.rst
@@ -0,0 +1,18 @@
+==================
+Heat Sample Policy
+==================
+
+The following is a sample heat policy file that has been auto-generated
+from default policy values in code. If you're using the default policies, then
+the maintenance of this file is not necessary, and it should not be copied into
+a deployment. Doing so will result in duplicate policy definitions. It is here
+to help explain which policy operations protect specific heat APIs, but it
+is not suggested to copy and paste into a deployment unless you're planning on
+providing a different policy for an operation that is not the default.
+
+If you wish build a policy file, you can also use ``tox -e genpolicy`` to
+generate it.
+
+The sample policy file can also be downloaded in `file form <../_static/heat.policy.yaml.sample>`_.
+
+.. literalinclude:: ../_static/heat.policy.yaml.sample
diff --git a/doc/source/configuration/tables/heat-api.rst b/doc/source/configuration/tables/heat-api.rst
index fdb56e80b..a0d0ae034 100644
--- a/doc/source/configuration/tables/heat-api.rst
+++ b/doc/source/configuration/tables/heat-api.rst
@@ -110,8 +110,6 @@
- (Boolean) Whether the application is behind a proxy or not. This determines if the middleware should parse the headers or not.
* - ``max_request_body_size`` = ``114688``
- (Integer) The maximum body size for each request, in bytes.
- * - ``secure_proxy_ssl_header`` = ``X-Forwarded-Proto``
- - (String) DEPRECATED: The HTTP Header that will be used to determine what the original request protocol scheme was, even if it was hidden by a SSL termination proxy.
* - **[oslo_versionedobjects]**
-
* - ``fatal_exception_format_errors`` = ``False``
diff --git a/doc/source/configuration/tables/heat-cloudwatch_api.rst b/doc/source/configuration/tables/heat-cloudwatch_api.rst
deleted file mode 100644
index e1594b885..000000000
--- a/doc/source/configuration/tables/heat-cloudwatch_api.rst
+++ /dev/null
@@ -1,42 +0,0 @@
-..
- Warning: Do not edit this file. It is automatically generated from the
- software project's code and your changes will be overwritten.
-
- The tool to generate this file lives in openstack-doc-tools repository.
-
- Please make any changes needed in the code, then run the
- autogenerate-config-doc tool from the openstack-doc-tools repository, or
- ask for help on the documentation mailing list, IRC channel or meeting.
-
-.. _heat-cloudwatch_api:
-
-.. list-table:: Description of CloudWatch API configuration options
- :header-rows: 1
- :class: config-ref-table
-
- * - Configuration option = Default value
- - Description
- * - **[DEFAULT]**
- -
- * - ``enable_cloud_watch_lite`` = ``False``
- - (Boolean) Enable the legacy OS::Heat::CWLiteAlarm resource.
- * - ``heat_watch_server_url`` =
- - (String) URL of the Heat CloudWatch server.
- * - **[heat_api_cloudwatch]**
- -
- * - ``backlog`` = ``4096``
- - (Integer) Number of backlog requests to configure the socket with.
- * - ``bind_host`` = ``0.0.0.0``
- - (IP) Address to bind the server. Useful when selecting a particular network interface.
- * - ``bind_port`` = ``8003``
- - (Port number) The port on which the server will listen.
- * - ``cert_file`` = ``None``
- - (String) Location of the SSL certificate file to use for SSL mode.
- * - ``key_file`` = ``None``
- - (String) Location of the SSL key file to use for enabling SSL mode.
- * - ``max_header_line`` = ``16384``
- - (Integer) Maximum line size of message headers to be accepted. max_header_line may need to be increased when using large tokens (typically those generated by the Keystone v3 API with big service catalogs.)
- * - ``tcp_keepidle`` = ``600``
- - (Integer) The value for the socket option TCP_KEEPIDLE. This is the time in seconds that the connection must be idle before TCP starts sending keepalive probes.
- * - ``workers`` = ``1``
- - (Integer) Number of workers for Heat service.
diff --git a/doc/source/configuration/tables/heat-common.rst b/doc/source/configuration/tables/heat-common.rst
index 77aefe0b7..e59b9aa05 100644
--- a/doc/source/configuration/tables/heat-common.rst
+++ b/doc/source/configuration/tables/heat-common.rst
@@ -73,7 +73,7 @@
* - **[heat_all]**
-
* - ``enabled_services`` = ``engine, api, api_cfn``
- - (List) Specifies the heat services that are enabled when running heat-all. Valid options are all or any combination of api, engine, api_cfn, or api_cloudwatch.
+ - (List) Specifies the heat services that are enabled when running heat-all. Valid options are all or any combination of api, engine or api_cfn.
* - **[profiler]**
-
* - ``connection_string`` = ``messaging://``
diff --git a/doc/source/contributing/blueprints.rst b/doc/source/contributing/blueprints.rst
index d01d5f89f..c95505b1a 100644
--- a/doc/source/contributing/blueprints.rst
+++ b/doc/source/contributing/blueprints.rst
@@ -91,5 +91,5 @@ Lite spec from existing bugs
----------------------------
If there's an already existing bug that describes a small feature suitable for
-a spec-lite, add a `spec-lite' tag to the bug. There is no need to create a new
+a spec-lite, add a `spec-lite` tag to the bug. There is no need to create a new
bug. The comments and history of the existing bug are important for it's review.
diff --git a/doc/source/ext/resources.py b/doc/source/ext/resources.py
index 02a53d403..b31ad965f 100644
--- a/doc/source/ext/resources.py
+++ b/doc/source/ext/resources.py
@@ -17,8 +17,8 @@ import pydoc
from docutils import core
from docutils import nodes
+from docutils.parsers import rst
import six
-from sphinx.util import compat
from heat.common.i18n import _
from heat.engine import attributes
@@ -53,7 +53,7 @@ class contribresourcepages(nodes.General, nodes.Element):
pass
-class ResourcePages(compat.Directive):
+class ResourcePages(rst.Directive):
has_content = False
required_arguments = 0
optional_arguments = 1
@@ -203,7 +203,7 @@ resources:
return (x_status > y_status) - (x_status < y_status)
def contribute_property(self, parent, prop_key, prop, upd_para=None,
- id_pattern_prefix=None):
+ id_pattern_prefix=None, sub_prop=False):
if not id_pattern_prefix:
id_pattern_prefix = '%s-prop'
id_pattern = id_pattern_prefix + '-' + prop_key
@@ -218,6 +218,15 @@ resources:
definition.append(note)
return
+ if sub_prop and prop.type != properties.Schema.LIST and prop.type\
+ != properties.Schema.MAP:
+ if prop.required:
+ para = nodes.paragraph('', _('Required.'))
+ definition.append(para)
+ else:
+ para = nodes.paragraph('', _('Optional.'))
+ definition.append(para)
+
if prop.description:
para = nodes.paragraph('', prop.description)
definition.append(para)
@@ -272,7 +281,8 @@ resources:
indent = nodes.block_quote()
definition.append(indent)
self.contribute_property(
- indent, _key, _prop, upd_para, id_pattern)
+ indent, _key, _prop, upd_para, id_pattern,
+ sub_prop=True)
def contribute_properties(self, parent):
if not self.props_schemata:
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
index 28f021885..6d05405a2 100644
--- a/doc/source/glossary.rst
+++ b/doc/source/glossary.rst
@@ -168,7 +168,7 @@
configure instances at boot time. See also `User data (OpenStack
End User Guide)`_.
- .. _User data (OpenStack End User Guide): http://docs.openstack.org/user-guide/cli_provide_user_data_to_instances.html
+ .. _User data (OpenStack End User Guide): https://docs.openstack.org/nova/latest/user/user-data.html
.. _cloud-init: https://help.ubuntu.com/community/CloudInit
Wait condition
diff --git a/doc/source/index.rst b/doc/source/index.rst
index b6dd616d6..1874570ce 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -113,7 +113,6 @@ API Documentation
.. _`Heat REST API Reference (OpenStack API Complete Reference - Orchestration)`: http://developer.openstack.org/api-ref/orchestration/v1/
-
Code Documentation
==================
.. toctree::
diff --git a/doc/source/install/get_started.rst b/doc/source/install/get_started.rst
index b8f63128a..0f772cd2b 100644
--- a/doc/source/install/get_started.rst
+++ b/doc/source/install/get_started.rst
@@ -29,9 +29,6 @@ The Orchestration service consists of the following components:
An AWS Query API that is compatible with AWS CloudFormation. It
processes API requests by sending them to the ``heat-engine`` over RPC.
-``heat-api-cloudwatch`` component
- A CloudWatch-like API service to the heat project.
-
``heat-engine``
Orchestrates the launching of templates and provides events back to
the API consumer.
diff --git a/doc/source/man/heat-api-cloudwatch.rst b/doc/source/man/heat-api-cloudwatch.rst
deleted file mode 100644
index ed2f68914..000000000
--- a/doc/source/man/heat-api-cloudwatch.rst
+++ /dev/null
@@ -1,38 +0,0 @@
-===================
-heat-api-cloudwatch
-===================
-
-.. program:: heat-api-cloudwatch
-
-SYNOPSIS
-========
-``heat-api-cloudwatch [options]``
-
-DESCRIPTION
-===========
-heat-api-cloudwatch is a CloudWatch-like API service to the heat project.
-
-OPTIONS
-=======
-.. cmdoption:: --config-file
-
- Path to a config file to use. Multiple config files can be specified, with
- values in later files taking precedence.
-
-
-.. cmdoption:: --config-dir
-
- Path to a config directory to pull .conf files from. This file set is
- sorted, so as to provide a predictable parse order if individual options are
- over-ridden. The set is parsed after the file(s), if any, specified via
- --config-file, hence over-ridden options in the directory take precedence.
-
-.. cmdoption:: --version
-
- Show program's version number and exit. The output could be empty if
- the distribution didn't specify any version information.
-
-FILES
-========
-
-* /etc/heat/heat.conf
diff --git a/doc/source/man/heat-manage.rst b/doc/source/man/heat-manage.rst
index f9e19faf9..e9ad40a7d 100644
--- a/doc/source/man/heat-manage.rst
+++ b/doc/source/man/heat-manage.rst
@@ -23,7 +23,7 @@ Run with -h to see a list of available commands:
``heat-manage -h``
Commands are ``db_version``, ``db_sync``, ``purge_deleted``,
-``migrate_covergence_1``, ``migrate_properties_data``, and
+``migrate_convergence_1``, ``migrate_properties_data``, and
``service``. Detailed descriptions are below.
``heat-manage db_version``
diff --git a/doc/source/man/index.rst b/doc/source/man/index.rst
index f2a2055c7..1dbea361e 100644
--- a/doc/source/man/index.rst
+++ b/doc/source/man/index.rst
@@ -12,7 +12,6 @@ Heat services
heat-engine
heat-api
heat-api-cfn
- heat-api-cloudwatch
--------------
Heat utilities
diff --git a/doc/source/operating_guides/httpd.rst b/doc/source/operating_guides/httpd.rst
index 75347c635..1f3eae327 100644
--- a/doc/source/operating_guides/httpd.rst
+++ b/doc/source/operating_guides/httpd.rst
@@ -21,13 +21,11 @@ On Debian/Ubuntu systems it is::
/etc/apache2/sites-available/heat-api.conf
/etc/apache2/sites-available/heat-api-cfn.conf
- /etc/apache2/sites-available/heat-api-cloudwatch.conf
On Red Hat based systems it is::
/etc/httpd/conf.d/uwsgi-heat-api.conf
/etc/httpd/conf.d/uwsgi-heat-api-cfn.conf
- /etc/httpd/conf.d/uwsgi-heat-api-cloudwatch.conf
uwsgi
-----
@@ -42,20 +40,18 @@ other services too) and just need to restart the local uwsgi daemons.
The httpd/files directory contains sample files for configuring httpd to run
Heat api services under uwsgi in this configuration. To use the sample configs
-simply copy `uwsgi-heat-api.conf`, `uwsgi-heat-api-cfn.conf` and
-`uwsgi-heat-api-cloudwatch.conf` to the appropriate location for your web server.
+simply copy `uwsgi-heat-api.conf` and `uwsgi-heat-api-cfn.conf` to the
+appropriate location for your web server.
On Debian/Ubuntu systems it is::
/etc/apache2/sites-available/uwsgi-heat-api.conf
/etc/apache2/sites-available/uwsgi-heat-api-cfn.conf
- /etc/apache2/sites-available/uwsgi-heat-api-cloudwatch.conf
On Red Hat based systems it is::
/etc/httpd/conf.d/uwsgi-heat-api.conf
/etc/httpd/conf.d/uwsgi-heat-api-cfn.conf
- /etc/httpd/conf.d/uwsgi-heat-api-cloudwatch.conf
Enable mod_proxy by running ``sudo a2enmod proxy``
@@ -65,7 +61,6 @@ Red Hat based systems)::
ln -s /etc/apache2/sites-available/uwsgi-heat-api.conf /etc/apache2/sites-enabled
ln -s /etc/apache2/sites-available/uwsgi-heat-api-cfn.conf /etc/apache2/sites-enabled
- ln -s /etc/apache2/sites-available/uwsgi-heat-api-cloudwatch.conf /etc/apache2/sites-enabled
Start or restart httpd to pick up the new configuration.
@@ -74,7 +69,6 @@ files to `/etc/heat`::
heat-api-uwsgi.ini
heat-api-cfn-uwsgi.ini
- heat-api-cloudwatch-uwsgi.ini
Update the files to match your system configuration (for example, you'll
want to set the number of processes and threads).
@@ -84,7 +78,6 @@ Install uwsgi and start the heat-api server using uwsgi::
sudo pip install uwsgi
uwsgi --ini /etc/heat/heat-api-uwsgi.ini
uwsgi --ini /etc/heat/heat-api-cfn-uwsgi.ini
- uwsgi --ini /etc/heat/heat-api-cloudwatch-uwsgi.ini
.. NOTE::
diff --git a/doc/source/template_guide/hot_spec.rst b/doc/source/template_guide/hot_spec.rst
index d01864d38..787103f46 100644
--- a/doc/source/template_guide/hot_spec.rst
+++ b/doc/source/template_guide/hot_spec.rst
@@ -290,16 +290,16 @@ for the ``heat_template_version`` key:
or
2017-09-01 | pike
--------------------
+-----------------
The key with value ``2017-09-01`` or ``pike`` indicates that the YAML
document is a HOT template and it may contain features added and/or removed
up until the Pike release. This version adds the ``make_url`` function for
assembling URLs, the ``list_concat`` function for combining multiple
lists, the ``list_concat_unique`` function for combining multiple
- lists without repeating items, the``string_replace_vstrict`` which
- raises errors for missing and empty params, and the ``contains`` which
- checks whether specific value is in a sequence. The complete list of
- supported functions is::
+ lists without repeating items, the ``string_replace_vstrict`` function
+ which raises errors for missing and empty params, and the ``contains``
+ function which checks whether specific value is in a sequence. The
+ complete list of supported functions is::
digest
filter
@@ -1599,7 +1599,7 @@ rather than later when processing a template.
str_replace_vstrict
-------------------
+-------------------
``str_replace_vstrict`` behaves identically to the
``str_replace_strict`` function, only an error is raised if any of the
params are empty. This may help catch issues (i.e., prevent
diff --git a/etc/heat/api-paste.ini b/etc/heat/api-paste.ini
index 986a4a252..ad5b3112a 100644
--- a/etc/heat/api-paste.ini
+++ b/etc/heat/api-paste.ini
@@ -38,15 +38,6 @@ pipeline = cors http_proxy_to_wsgi cfnversionnegotiation osprofiler ec2authtoken
[pipeline:heat-api-cfn-standalone]
pipeline = cors http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app
-# heat-api-cloudwatch pipeline
-[pipeline:heat-api-cloudwatch]
-pipeline = cors versionnegotiation osprofiler ec2authtoken authtoken context apicwapp
-
-# heat-api-cloudwatch pipeline for standalone heat
-# relies exclusively on authenticating with ec2 signed requests
-[pipeline:heat-api-cloudwatch-standalone]
-pipeline = cors versionnegotiation ec2authtoken context apicwapp
-
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.openstack.v1:API
@@ -55,10 +46,6 @@ heat.app_factory = heat.api.openstack.v1:API
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cfn.v1:API
-[app:apicwapp]
-paste.app_factory = heat.common.wsgi:app_factory
-heat.app_factory = heat.api.cloudwatch:API
-
[filter:versionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.openstack:version_negotiation_filter
@@ -77,7 +64,6 @@ heat.filter_factory = heat.api.cfn:version_negotiation_filter
[filter:cwversionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
-heat.filter_factory = heat.api.cloudwatch:version_negotiation_filter
[filter:context]
paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory
diff --git a/etc/heat/heat-policy-generator.conf b/etc/heat/heat-policy-generator.conf
index 6d11632ac..bd05e3469 100644
--- a/etc/heat/heat-policy-generator.conf
+++ b/etc/heat/heat-policy-generator.conf
@@ -1,4 +1,3 @@
[DEFAULT]
-format = json
namespace = heat
-output_file = etc/heat/policy.json.sample
+output_file = etc/heat/policy.yaml.sample
diff --git a/etc/heat/policy.json b/etc/heat/policy.json
deleted file mode 100644
index c3a3bd58f..000000000
--- a/etc/heat/policy.json
+++ /dev/null
@@ -1,97 +0,0 @@
-{
- "cloudformation:ListStacks": "rule:deny_stack_user",
- "cloudformation:CreateStack": "rule:deny_stack_user",
- "cloudformation:DescribeStacks": "rule:deny_stack_user",
- "cloudformation:DeleteStack": "rule:deny_stack_user",
- "cloudformation:UpdateStack": "rule:deny_stack_user",
- "cloudformation:CancelUpdateStack": "rule:deny_stack_user",
- "cloudformation:DescribeStackEvents": "rule:deny_stack_user",
- "cloudformation:ValidateTemplate": "rule:deny_stack_user",
- "cloudformation:GetTemplate": "rule:deny_stack_user",
- "cloudformation:EstimateTemplateCost": "rule:deny_stack_user",
- "cloudformation:DescribeStackResource": "",
- "cloudformation:DescribeStackResources": "rule:deny_stack_user",
- "cloudformation:ListStackResources": "rule:deny_stack_user",
-
- "cloudwatch:DeleteAlarms": "rule:deny_stack_user",
- "cloudwatch:DescribeAlarmHistory": "rule:deny_stack_user",
- "cloudwatch:DescribeAlarms": "rule:deny_stack_user",
- "cloudwatch:DescribeAlarmsForMetric": "rule:deny_stack_user",
- "cloudwatch:DisableAlarmActions": "rule:deny_stack_user",
- "cloudwatch:EnableAlarmActions": "rule:deny_stack_user",
- "cloudwatch:GetMetricStatistics": "rule:deny_stack_user",
- "cloudwatch:ListMetrics": "rule:deny_stack_user",
- "cloudwatch:PutMetricAlarm": "rule:deny_stack_user",
- "cloudwatch:PutMetricData": "",
- "cloudwatch:SetAlarmState": "rule:deny_stack_user",
-
- "actions:action": "rule:deny_stack_user",
- "build_info:build_info": "rule:deny_stack_user",
- "events:index": "rule:deny_stack_user",
- "events:show": "rule:deny_stack_user",
- "resource:index": "rule:deny_stack_user",
- "resource:metadata": "",
- "resource:signal": "",
- "resource:mark_unhealthy": "rule:deny_stack_user",
- "resource:show": "rule:deny_stack_user",
- "stacks:abandon": "rule:deny_stack_user",
- "stacks:create": "rule:deny_stack_user",
- "stacks:delete": "rule:deny_stack_user",
- "stacks:detail": "rule:deny_stack_user",
- "stacks:export": "rule:deny_stack_user",
- "stacks:generate_template": "rule:deny_stack_user",
- "stacks:global_index": "rule:deny_everybody",
- "stacks:index": "rule:deny_stack_user",
- "stacks:list_resource_types": "rule:deny_stack_user",
- "stacks:list_template_versions": "rule:deny_stack_user",
- "stacks:list_template_functions": "rule:deny_stack_user",
- "stacks:lookup": "",
- "stacks:preview": "rule:deny_stack_user",
- "stacks:resource_schema": "rule:deny_stack_user",
- "stacks:show": "rule:deny_stack_user",
- "stacks:template": "rule:deny_stack_user",
- "stacks:environment": "rule:deny_stack_user",
- "stacks:files": "rule:deny_stack_user",
- "stacks:update": "rule:deny_stack_user",
- "stacks:update_patch": "rule:deny_stack_user",
- "stacks:preview_update": "rule:deny_stack_user",
- "stacks:preview_update_patch": "rule:deny_stack_user",
- "stacks:validate_template": "rule:deny_stack_user",
- "stacks:snapshot": "rule:deny_stack_user",
- "stacks:show_snapshot": "rule:deny_stack_user",
- "stacks:delete_snapshot": "rule:deny_stack_user",
- "stacks:list_snapshots": "rule:deny_stack_user",
- "stacks:restore_snapshot": "rule:deny_stack_user",
- "stacks:list_outputs": "rule:deny_stack_user",
- "stacks:show_output": "rule:deny_stack_user",
-
- "software_configs:global_index": "rule:deny_everybody",
- "software_configs:index": "rule:deny_stack_user",
- "software_configs:create": "rule:deny_stack_user",
- "software_configs:show": "rule:deny_stack_user",
- "software_configs:delete": "rule:deny_stack_user",
- "software_deployments:index": "rule:deny_stack_user",
- "software_deployments:create": "rule:deny_stack_user",
- "software_deployments:show": "rule:deny_stack_user",
- "software_deployments:update": "rule:deny_stack_user",
- "software_deployments:delete": "rule:deny_stack_user",
- "software_deployments:metadata": "",
-
- "service:index": "rule:context_is_admin",
-
- "resource_types:OS::Nova::Flavor": "rule:project_admin",
- "resource_types:OS::Cinder::EncryptedVolumeType": "rule:project_admin",
- "resource_types:OS::Cinder::VolumeType": "rule:project_admin",
- "resource_types:OS::Cinder::Quota": "rule:project_admin",
- "resource_types:OS::Neutron::Quota": "rule:project_admin",
- "resource_types:OS::Nova::Quota": "rule:project_admin",
- "resource_types:OS::Manila::ShareType": "rule:project_admin",
- "resource_types:OS::Neutron::ProviderNet": "rule:project_admin",
- "resource_types:OS::Neutron::QoSPolicy": "rule:project_admin",
- "resource_types:OS::Neutron::QoSBandwidthLimitRule": "rule:project_admin",
- "resource_types:OS::Neutron::Segment": "rule:project_admin",
- "resource_types:OS::Nova::HostAggregate": "rule:project_admin",
- "resource_types:OS::Cinder::QoSSpecs": "rule:project_admin",
- "resource_types:OS::Cinder::QoSAssociation": "rule:project_admin",
- "resource_types:OS::Keystone::*": "rule:project_admin"
-}
diff --git a/heat/api/aws/exception.py b/heat/api/aws/exception.py
index b5f592d35..d92c24478 100644
--- a/heat/api/aws/exception.py
+++ b/heat/api/aws/exception.py
@@ -298,7 +298,6 @@ def map_remote_error(ex):
'ResourceActionNotSupported',
'ResourceNotFound',
'ResourceNotAvailable',
- 'WatchRuleNotFound',
'StackValidationFailed',
'InvalidSchemaError',
'InvalidTemplateReference',
diff --git a/heat/api/cfn/v1/stacks.py b/heat/api/cfn/v1/stacks.py
index 8ed72ccc7..245c8d57d 100644
--- a/heat/api/cfn/v1/stacks.py
+++ b/heat/api/cfn/v1/stacks.py
@@ -49,9 +49,9 @@ class StackController(object):
raise exception.HeatInvalidActionError()
def _enforce(self, req, action):
- """Authorize an action against the policy.json."""
+ """Authorize an action against the policy.json and policies in code."""
try:
- self.policy.enforce(req.context, action)
+ self.policy.enforce(req.context, action, is_registered_policy=True)
except heat_exception.Forbidden:
msg = _('Action %s not allowed for user') % action
raise exception.HeatAccessDeniedError(msg)
diff --git a/heat/api/cloudwatch/__init__.py b/heat/api/cloudwatch/__init__.py
deleted file mode 100644
index 69f0744c6..000000000
--- a/heat/api/cloudwatch/__init__.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import routes
-import webob
-
-from heat.api.cloudwatch import watch
-from heat.api.middleware import version_negotiation as vn
-from heat.api import versions
-from heat.common import wsgi
-
-
-class API(wsgi.Router):
-
- """WSGI router for Heat CloudWatch API."""
-
- _actions = {
- 'delete_alarms': 'DeleteAlarms',
- 'describe_alarm_history': 'DescribeAlarmHistory',
- 'describe_alarms': 'DescribeAlarms',
- 'describe_alarms_for_metric': 'DescribeAlarmsForMetric',
- 'disable_alarm_actions': 'DisableAlarmActions',
- 'enable_alarm_actions': 'EnableAlarmActions',
- 'get_metric_statistics': 'GetMetricStatistics',
- 'list_metrics': 'ListMetrics',
- 'put_metric_alarm': 'PutMetricAlarm',
- 'put_metric_data': 'PutMetricData',
- 'set_alarm_state': 'SetAlarmState',
- }
-
- def __init__(self, conf, **local_conf):
- self.conf = conf
- mapper = routes.Mapper()
- controller_resource = watch.create_resource(conf)
-
- def conditions(action):
- api_action = self._actions[action]
-
- def action_match(environ, result):
- req = webob.Request(environ)
- env_action = req.params.get("Action")
- return env_action == api_action
-
- return {'function': action_match}
-
- for action in self._actions:
- mapper.connect("/", controller=controller_resource, action=action,
- conditions=conditions(action))
-
- mapper.connect("/", controller=controller_resource, action="index")
-
- super(API, self).__init__(mapper)
-
-
-def version_negotiation_filter(app, conf, **local_conf):
- return vn.VersionNegotiationFilter(versions.Controller, app,
- conf, **local_conf)
diff --git a/heat/api/cloudwatch/watch.py b/heat/api/cloudwatch/watch.py
deleted file mode 100644
index 368b07d30..000000000
--- a/heat/api/cloudwatch/watch.py
+++ /dev/null
@@ -1,321 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Endpoint for heat AWS-compatible CloudWatch API."""
-
-from oslo_log import log as logging
-import oslo_messaging as messaging
-import six
-
-from heat.api.aws import exception
-from heat.api.aws import utils as api_utils
-from heat.common import exception as heat_exception
-from heat.common.i18n import _
-from heat.common import policy
-from heat.common import wsgi
-from heat.rpc import api as rpc_api
-from heat.rpc import client as rpc_client
-
-LOG = logging.getLogger(__name__)
-
-
-class WatchController(object):
-
- """WSGI controller for CloudWatch resource in heat API.
-
- Implements the API actions.
- """
-
- def __init__(self, options):
- self.options = options
- self.rpc_client = rpc_client.EngineClient()
- self.policy = policy.Enforcer(scope='cloudwatch')
-
- def _enforce(self, req, action):
- """Authorize an action against the policy.json."""
- try:
- self.policy.enforce(req.context, action)
- except heat_exception.Forbidden:
- msg = _("Action %s not allowed for user") % action
- raise exception.HeatAccessDeniedError(msg)
- except Exception:
- # We expect policy.enforce to either pass or raise Forbidden
- # however, if anything else happens, we want to raise
- # HeatInternalFailureError, failure to do this results in
- # the user getting a big stacktrace spew as an API response
- msg = _("Error authorizing action %s") % action
- raise exception.HeatInternalFailureError(msg)
-
- @staticmethod
- def _reformat_dimensions(dims):
- """Reformat dimensions list into AWS API format.
-
- :param dims: a list of dicts.
- """
- newdims = []
- for count, d in enumerate(dims, 1):
- for key, value in d.items():
- newdims.append({'Name': key, 'Value': value})
- return newdims
-
- def delete_alarms(self, req):
- """Implements DeleteAlarms API action."""
- self._enforce(req, 'DeleteAlarms')
- return exception.HeatAPINotImplementedError()
-
- def describe_alarm_history(self, req):
- """Implements DescribeAlarmHistory API action."""
- self._enforce(req, 'DescribeAlarmHistory')
- return exception.HeatAPINotImplementedError()
-
- def describe_alarms(self, req):
- """Implements DescribeAlarms API action."""
- self._enforce(req, 'DescribeAlarms')
-
- def format_metric_alarm(a):
- """Reformat engine output into the AWS "MetricAlarm" format."""
- keymap = {
- rpc_api.WATCH_ACTIONS_ENABLED: 'ActionsEnabled',
- rpc_api.WATCH_ALARM_ACTIONS: 'AlarmActions',
- rpc_api.WATCH_TOPIC: 'AlarmArn',
- rpc_api.WATCH_UPDATED_TIME:
- 'AlarmConfigurationUpdatedTimestamp',
- rpc_api.WATCH_DESCRIPTION: 'AlarmDescription',
- rpc_api.WATCH_NAME: 'AlarmName',
- rpc_api.WATCH_COMPARISON: 'ComparisonOperator',
- rpc_api.WATCH_DIMENSIONS: 'Dimensions',
- rpc_api.WATCH_PERIODS: 'EvaluationPeriods',
- rpc_api.WATCH_INSUFFICIENT_ACTIONS:
- 'InsufficientDataActions',
- rpc_api.WATCH_METRIC_NAME: 'MetricName',
- rpc_api.WATCH_NAMESPACE: 'Namespace',
- rpc_api.WATCH_OK_ACTIONS: 'OKActions',
- rpc_api.WATCH_PERIOD: 'Period',
- rpc_api.WATCH_STATE_REASON: 'StateReason',
- rpc_api.WATCH_STATE_REASON_DATA: 'StateReasonData',
- rpc_api.WATCH_STATE_UPDATED_TIME: 'StateUpdatedTimestamp',
- rpc_api.WATCH_STATE_VALUE: 'StateValue',
- rpc_api.WATCH_STATISTIC: 'Statistic',
- rpc_api.WATCH_THRESHOLD: 'Threshold',
- rpc_api.WATCH_UNIT: 'Unit',
- }
-
- # AWS doesn't return StackId in the main MetricAlarm
- # structure, so we add StackId as a dimension to all responses
- a[rpc_api.WATCH_DIMENSIONS].append({'StackId':
- a[rpc_api.WATCH_STACK_ID]})
-
- # Reformat dimensions list into AWS API format
- a[rpc_api.WATCH_DIMENSIONS] = self._reformat_dimensions(
- a[rpc_api.WATCH_DIMENSIONS])
-
- return api_utils.reformat_dict_keys(keymap, a)
-
- con = req.context
- parms = dict(req.params)
- try:
- name = parms['AlarmName']
- except KeyError:
- name = None
-
- try:
- watch_list = self.rpc_client.show_watch(con, watch_name=name)
- except messaging.RemoteError as ex:
- return exception.map_remote_error(ex)
-
- res = {'MetricAlarms': [format_metric_alarm(a)
- for a in watch_list]}
-
- result = api_utils.format_response("DescribeAlarms", res)
- return result
-
- def describe_alarms_for_metric(self, req):
- """Implements DescribeAlarmsForMetric API action."""
- self._enforce(req, 'DescribeAlarmsForMetric')
- return exception.HeatAPINotImplementedError()
-
- def disable_alarm_actions(self, req):
- """Implements DisableAlarmActions API action."""
- self._enforce(req, 'DisableAlarmActions')
- return exception.HeatAPINotImplementedError()
-
- def enable_alarm_actions(self, req):
- """Implements EnableAlarmActions API action."""
- self._enforce(req, 'EnableAlarmActions')
- return exception.HeatAPINotImplementedError()
-
- def get_metric_statistics(self, req):
- """Implements GetMetricStatistics API action."""
- self._enforce(req, 'GetMetricStatistics')
- return exception.HeatAPINotImplementedError()
-
- def list_metrics(self, req):
- """Implements ListMetrics API action.
-
- Lists metric datapoints associated with a particular alarm,
- or all alarms if none specified.
- """
- self._enforce(req, 'ListMetrics')
-
- def format_metric_data(d, fil=None):
- """Reformat engine output into the AWS "Metric" format.
-
- Takes an optional filter dict, which is traversed
- so a metric dict is only returned if all keys match
- the filter dict.
- """
- fil = fil or {}
- dimensions = [
- {'AlarmName': d[rpc_api.WATCH_DATA_ALARM]},
- {'Timestamp': d[rpc_api.WATCH_DATA_TIME]}
- ]
- for key in d[rpc_api.WATCH_DATA]:
- dimensions.append({key: d[rpc_api.WATCH_DATA][key]})
-
- newdims = self._reformat_dimensions(dimensions)
-
- result = {
- 'MetricName': d[rpc_api.WATCH_DATA_METRIC],
- 'Dimensions': newdims,
- 'Namespace': d[rpc_api.WATCH_DATA_NAMESPACE],
- }
-
- for f in fil:
- try:
- value = result[f]
- if value != fil[f]:
- # Filter criteria not met, return None
- return
- except KeyError:
- LOG.warning("Invalid filter key %s, ignoring", f)
-
- return result
-
- con = req.context
- parms = dict(req.params)
- # FIXME : Don't yet handle filtering by Dimensions
- filter_result = dict((k, v) for (k, v) in six.iteritems(parms) if k in
- ("MetricName", "Namespace"))
- LOG.debug("filter parameters : %s" % filter_result)
-
- try:
- # Engine does not currently support query by namespace/metric
- # so we pass None/None and do any filtering locally
- null_kwargs = {'metric_namespace': None,
- 'metric_name': None}
- watch_data = self.rpc_client.show_watch_metric(con,
- **null_kwargs)
- except messaging.RemoteError as ex:
- return exception.map_remote_error(ex)
-
- res = {'Metrics': []}
- for d in watch_data:
- metric = format_metric_data(d, filter_result)
- if metric:
- res['Metrics'].append(metric)
-
- result = api_utils.format_response("ListMetrics", res)
- return result
-
- def put_metric_alarm(self, req):
- """Implements PutMetricAlarm API action."""
- self._enforce(req, 'PutMetricAlarm')
- return exception.HeatAPINotImplementedError()
-
- def put_metric_data(self, req):
- """Implements PutMetricData API action."""
- self._enforce(req, 'PutMetricData')
-
- con = req.context
- parms = dict(req.params)
- namespace = api_utils.get_param_value(parms, 'Namespace')
-
- # Extract data from the request so we can pass it to the engine
- # We have to do this in two passes, because the AWS
- # query format nests the dimensions within the MetricData
- # query-parameter-list (see AWS PutMetricData docs)
- # extract_param_list gives a list-of-dict, which we then
- # need to process (each dict) for dimensions
- metric_data = api_utils.extract_param_list(parms, prefix='MetricData')
- if not len(metric_data):
- LOG.error("Request does not contain required MetricData")
- return exception.HeatMissingParameterError(_("MetricData list"))
-
- watch_name = None
- dimensions = []
- for p in metric_data:
- dimension = api_utils.extract_param_pairs(p,
- prefix='Dimensions',
- keyname='Name',
- valuename='Value')
- if 'AlarmName' in dimension:
- watch_name = dimension['AlarmName']
- else:
- dimensions.append(dimension)
-
- # Extract the required data from the metric_data
- # and format dict to pass to engine
- data = {'Namespace': namespace,
- api_utils.get_param_value(metric_data[0], 'MetricName'): {
- 'Unit': api_utils.get_param_value(metric_data[0], 'Unit'),
- 'Value': api_utils.get_param_value(metric_data[0],
- 'Value'),
- 'Dimensions': dimensions}}
-
- try:
- self.rpc_client.create_watch_data(con, watch_name, data)
- except messaging.RemoteError as ex:
- return exception.map_remote_error(ex)
-
- result = {'ResponseMetadata': None}
- return api_utils.format_response("PutMetricData", result)
-
- def set_alarm_state(self, req):
- """Implements SetAlarmState API action."""
- self._enforce(req, 'SetAlarmState')
-
- # Map from AWS state names to those used in the engine
- state_map = {'OK': rpc_api.WATCH_STATE_OK,
- 'ALARM': rpc_api.WATCH_STATE_ALARM,
- 'INSUFFICIENT_DATA': rpc_api.WATCH_STATE_NODATA}
-
- con = req.context
- parms = dict(req.params)
-
- # Get mandatory parameters
- name = api_utils.get_param_value(parms, 'AlarmName')
- state = api_utils.get_param_value(parms, 'StateValue')
-
- if state not in state_map:
- msg = _('Invalid state %(state)s, '
- 'expecting one of %(expect)s') % {
- 'state': state,
- 'expect': list(state_map.keys())}
- LOG.error(msg)
- return exception.HeatInvalidParameterValueError(msg)
-
- LOG.debug("setting %(name)s to %(state)s" % {
- 'name': name, 'state': state_map[state]})
- try:
- self.rpc_client.set_watch_state(con, watch_name=name,
- state=state_map[state])
- except messaging.RemoteError as ex:
- return exception.map_remote_error(ex)
-
- return api_utils.format_response("SetAlarmState", "")
-
-
-def create_resource(options):
- """Watch resource factory method."""
- deserializer = wsgi.JSONRequestDeserializer()
- return wsgi.Resource(WatchController(options), deserializer)
diff --git a/heat/api/middleware/ssl.py b/heat/api/middleware/ssl.py
deleted file mode 100644
index d4e7564ed..000000000
--- a/heat/api/middleware/ssl.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from debtcollector import removals
-from oslo_config import cfg
-from oslo_middleware import ssl
-
-from heat.common.i18n import _
-
-ssl_middleware_opts = [
- cfg.StrOpt('secure_proxy_ssl_header',
- default='X-Forwarded-Proto',
- deprecated_group='DEFAULT',
- help=_("The HTTP Header that will be used to determine what "
- "the original request protocol scheme was, even if "
- "it was removed by an SSL terminator proxy."))
-]
-
-
-removals.removed_module(__name__,
- "oslo_middleware.http_proxy_to_wsgi")
-
-
-class SSLMiddleware(ssl.SSLMiddleware):
-
- def __init__(self, application, *args, **kwargs):
- # NOTE(cbrandily): calling super(ssl.SSLMiddleware, self).__init__
- # allows to define our opt (including a deprecation).
- super(ssl.SSLMiddleware, self).__init__(application, *args, **kwargs)
- self.oslo_conf.register_opts(
- ssl_middleware_opts, group='oslo_middleware')
-
-
-def list_opts():
- yield None, ssl_middleware_opts
diff --git a/heat/api/openstack/__init__.py b/heat/api/openstack/__init__.py
index c7891313d..6713e94bb 100644
--- a/heat/api/openstack/__init__.py
+++ b/heat/api/openstack/__init__.py
@@ -11,9 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from debtcollector import removals
from heat.api.middleware import fault
-from heat.api.middleware import ssl
from heat.api.middleware import version_negotiation as vn
from heat.api.openstack import versions
@@ -25,9 +23,3 @@ def version_negotiation_filter(app, conf, **local_conf):
def faultwrap_filter(app, conf, **local_conf):
return fault.FaultWrapper(app)
-
-
-@removals.remove(message='Use oslo_middleware.http_proxy_to_wsgi instead.',
- version='6.0.0', removal_version='8.0.0')
-def sslmiddleware_filter(app, conf, **local_conf):
- return ssl.SSLMiddleware(app)
diff --git a/heat/api/openstack/v1/actions.py b/heat/api/openstack/v1/actions.py
index fcc4a5236..2b058e72c 100644
--- a/heat/api/openstack/v1/actions.py
+++ b/heat/api/openstack/v1/actions.py
@@ -26,7 +26,8 @@ class ActionController(object):
Implements the API for stack actions
"""
- # Define request scope (must match what is in policy.json)
+ # Define request scope (must match what is in policy.json or policies in
+ # code)
REQUEST_SCOPE = 'actions'
ACTIONS = (
@@ -41,7 +42,7 @@ class ActionController(object):
self.options = options
self.rpc_client = rpc_client.EngineClient()
- @util.identified_stack
+ @util.registered_identified_stack
def action(self, req, identity, body=None):
"""Performs a specified action on a stack.
diff --git a/heat/api/openstack/v1/build_info.py b/heat/api/openstack/v1/build_info.py
index 5172a13a8..2743f621b 100644
--- a/heat/api/openstack/v1/build_info.py
+++ b/heat/api/openstack/v1/build_info.py
@@ -24,14 +24,15 @@ class BuildInfoController(object):
Returns build information for current app.
"""
- # Define request scope (must match what is in policy.json)
+ # Define request scope (must match what is in policy.json or policies in
+ # code)
REQUEST_SCOPE = 'build_info'
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
- @util.policy_enforce
+ @util.registered_policy_enforce
def build_info(self, req):
engine_revision = self.rpc_client.get_revision(req.context)
build_info = {
diff --git a/heat/api/openstack/v1/events.py b/heat/api/openstack/v1/events.py
index 9cfc14f3f..465d0ab45 100644
--- a/heat/api/openstack/v1/events.py
+++ b/heat/api/openstack/v1/events.py
@@ -84,7 +84,8 @@ class EventController(object):
Implements the API actions.
"""
- # Define request scope (must match what is in policy.json)
+ # Define request scope (must match what is in policy.json or policies in
+ # code)
REQUEST_SCOPE = 'events'
def __init__(self, options):
@@ -106,7 +107,7 @@ class EventController(object):
return [format_event(req, e, keys) for e in events]
- @util.identified_stack
+ @util.registered_identified_stack
def index(self, req, identity, resource_name=None):
"""Lists summary information for all events."""
whitelist = {
@@ -149,7 +150,7 @@ class EventController(object):
return {'events': events}
- @util.identified_stack
+ @util.registered_identified_stack
def show(self, req, identity, resource_name, event_id):
"""Gets detailed information for an event."""
diff --git a/heat/api/openstack/v1/resources.py b/heat/api/openstack/v1/resources.py
index ab056b0e5..dc01e5378 100644
--- a/heat/api/openstack/v1/resources.py
+++ b/heat/api/openstack/v1/resources.py
@@ -75,7 +75,8 @@ class ResourceController(object):
Implements the API actions.
"""
- # Define request scope (must match what is in policy.json)
+ # Define request scope (must match what is in policy.json or policies in
+ # code)
REQUEST_SCOPE = 'resource'
def __init__(self, options):
@@ -92,7 +93,7 @@ class ResourceController(object):
else:
return default
- @util.identified_stack
+ @util.registered_identified_stack
def index(self, req, identity):
"""Lists information for all resources."""
@@ -131,7 +132,7 @@ class ResourceController(object):
return {'resources': [format_resource(req, res) for res in res_list]}
- @util.identified_stack
+ @util.registered_identified_stack
def show(self, req, identity, resource_name):
"""Gets detailed information for a resource."""
@@ -146,7 +147,7 @@ class ResourceController(object):
return {'resource': format_resource(req, res)}
- @util.identified_stack
+ @util.registered_identified_stack
def metadata(self, req, identity, resource_name):
"""Gets metadata information for a resource."""
@@ -156,14 +157,14 @@ class ResourceController(object):
return {rpc_api.RES_METADATA: res[rpc_api.RES_METADATA]}
- @util.identified_stack
+ @util.registered_identified_stack
def signal(self, req, identity, resource_name, body=None):
self.rpc_client.resource_signal(req.context,
stack_identity=identity,
resource_name=resource_name,
details=body)
- @util.identified_stack
+ @util.registered_identified_stack
def mark_unhealthy(self, req, identity, resource_name, body):
"""Mark a resource as healthy or unhealthy."""
data = dict()
diff --git a/heat/api/openstack/v1/services.py b/heat/api/openstack/v1/services.py
index d311a3972..c51167b7c 100644
--- a/heat/api/openstack/v1/services.py
+++ b/heat/api/openstack/v1/services.py
@@ -25,14 +25,15 @@ from heat.rpc import client as rpc_client
class ServiceController(object):
"""WSGI controller for reporting the heat engine status in Heat v1 API."""
- # Define request scope (must match what is in policy.json)
+ # Define request scope (must match what is in policy.json or policies in
+ # code)
REQUEST_SCOPE = 'service'
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
- @util.policy_enforce
+ @util.registered_policy_enforce
def index(self, req):
try:
services = self.rpc_client.list_services(req.context)
diff --git a/heat/api/openstack/v1/software_configs.py b/heat/api/openstack/v1/software_configs.py
index fc31f16c0..9724f0248 100644
--- a/heat/api/openstack/v1/software_configs.py
+++ b/heat/api/openstack/v1/software_configs.py
@@ -59,11 +59,11 @@ class SoftwareConfigController(object):
**params)
return {'software_configs': scs}
- @util.policy_enforce
+ @util.registered_policy_enforce
def global_index(self, req):
return self._index(req, use_admin_cnxt=True)
- @util.policy_enforce
+ @util.registered_policy_enforce
def index(self, req):
"""Lists summary information for all software configs."""
global_tenant = False
@@ -78,14 +78,14 @@ class SoftwareConfigController(object):
return self._index(req)
- @util.policy_enforce
+ @util.registered_policy_enforce
def show(self, req, config_id):
"""Gets detailed information for a software config."""
sc = self.rpc_client.show_software_config(
req.context, config_id)
return {'software_config': sc}
- @util.policy_enforce
+ @util.registered_policy_enforce
def create(self, req, body):
"""Create a new software config."""
create_data = {
@@ -100,7 +100,7 @@ class SoftwareConfigController(object):
req.context, **create_data)
return {'software_config': sc}
- @util.policy_enforce
+ @util.registered_policy_enforce
def delete(self, req, config_id):
"""Delete an existing software config."""
res = self.rpc_client.delete_software_config(req.context, config_id)
diff --git a/heat/api/openstack/v1/software_deployments.py b/heat/api/openstack/v1/software_deployments.py
index 369383bc4..99aca5785 100644
--- a/heat/api/openstack/v1/software_deployments.py
+++ b/heat/api/openstack/v1/software_deployments.py
@@ -34,7 +34,7 @@ class SoftwareDeploymentController(object):
def default(self, req, **args):
raise exc.HTTPNotFound()
- @util.policy_enforce
+ @util.registered_policy_enforce
def index(self, req):
"""List software deployments."""
whitelist = {
@@ -44,7 +44,7 @@ class SoftwareDeploymentController(object):
sds = self.rpc_client.list_software_deployments(req.context, **params)
return {'software_deployments': sds}
- @util.policy_enforce
+ @util.registered_policy_enforce
def metadata(self, req, server_id):
"""List software deployments grouped by the group name.
@@ -54,14 +54,14 @@ class SoftwareDeploymentController(object):
req.context, server_id=server_id)
return {'metadata': sds}
- @util.policy_enforce
+ @util.registered_policy_enforce
def show(self, req, deployment_id):
"""Gets detailed information for a software deployment."""
sd = self.rpc_client.show_software_deployment(req.context,
deployment_id)
return {'software_deployment': sd}
- @util.policy_enforce
+ @util.registered_policy_enforce
def create(self, req, body):
"""Create a new software deployment."""
create_data = dict((k, body.get(k)) for k in (
@@ -72,7 +72,7 @@ class SoftwareDeploymentController(object):
**create_data)
return {'software_deployment': sd}
- @util.policy_enforce
+ @util.registered_policy_enforce
def update(self, req, deployment_id, body):
"""Update an existing software deployment."""
update_data = dict((k, body.get(k)) for k in (
@@ -84,7 +84,7 @@ class SoftwareDeploymentController(object):
**update_data)
return {'software_deployment': sd}
- @util.policy_enforce
+ @util.registered_policy_enforce
def delete(self, req, deployment_id):
"""Delete an existing software deployment."""
res = self.rpc_client.delete_software_deployment(req.context,
diff --git a/heat/api/openstack/v1/stacks.py b/heat/api/openstack/v1/stacks.py
index 11520ac09..cbbbded38 100644
--- a/heat/api/openstack/v1/stacks.py
+++ b/heat/api/openstack/v1/stacks.py
@@ -168,7 +168,8 @@ class StackController(object):
Implements the API actions.
"""
- # Define request scope (must match what is in policy.json)
+ # Define request scope (must match what is in policy.json or policies in
+ # code)
REQUEST_SCOPE = 'stacks'
def __init__(self, options):
@@ -329,11 +330,11 @@ class StackController(object):
count=count,
include_project=cnxt.is_admin)
- @util.policy_enforce
+ @util.registered_policy_enforce
def global_index(self, req):
return self._index(req, use_admin_cnxt=True)
- @util.policy_enforce
+ @util.registered_policy_enforce
def index(self, req):
"""Lists summary information for all stacks."""
global_tenant = False
@@ -348,14 +349,14 @@ class StackController(object):
return self._index(req)
- @util.policy_enforce
+ @util.registered_policy_enforce
def detail(self, req):
"""Lists detailed information for all stacks."""
stacks = self.rpc_client.list_stacks(req.context)
return {'stacks': [stacks_view.format_stack(req, s) for s in stacks]}
- @util.policy_enforce
+ @util.registered_policy_enforce
def preview(self, req, body):
"""Preview the outcome of a template and its params."""
@@ -389,7 +390,7 @@ class StackController(object):
raise exc.HTTPBadRequest(six.text_type(msg))
return args
- @util.policy_enforce
+ @util.registered_policy_enforce
def create(self, req, body):
"""Create a new stack."""
data = InstantiationData(body)
@@ -410,7 +411,7 @@ class StackController(object):
)
return {'stack': formatted_stack}
- @util.policy_enforce
+ @util.registered_policy_enforce
def lookup(self, req, stack_name, path='', body=None):
"""Redirect to the canonical URL for a stack."""
try:
@@ -429,7 +430,7 @@ class StackController(object):
raise exc.HTTPFound(location=location)
- @util.identified_stack
+ @util.registered_identified_stack
def show(self, req, identity):
"""Gets detailed information for a stack."""
params = req.params
@@ -450,7 +451,7 @@ class StackController(object):
return {'stack': stacks_view.format_stack(req, stack)}
- @util.identified_stack
+ @util.registered_identified_stack
def template(self, req, identity):
"""Get the template body for an existing stack."""
@@ -460,19 +461,19 @@ class StackController(object):
# TODO(zaneb): always set Content-type to application/json
return templ
- @util.identified_stack
+ @util.registered_identified_stack
def environment(self, req, identity):
"""Get the environment for an existing stack."""
env = self.rpc_client.get_environment(req.context, identity)
return env
- @util.identified_stack
+ @util.registered_identified_stack
def files(self, req, identity):
"""Get the files for an existing stack."""
return self.rpc_client.get_files(req.context, identity)
- @util.identified_stack
+ @util.registered_identified_stack
def update(self, req, identity, body):
"""Update an existing stack with a new template and/or parameters."""
data = InstantiationData(body)
@@ -489,7 +490,7 @@ class StackController(object):
raise exc.HTTPAccepted()
- @util.identified_stack
+ @util.registered_identified_stack
def update_patch(self, req, identity, body):
"""Update an existing stack with a new template.
@@ -518,7 +519,7 @@ class StackController(object):
if p_name in params:
return self._extract_bool_param(p_name, params[p_name])
- @util.identified_stack
+ @util.registered_identified_stack
def preview_update(self, req, identity, body):
"""Preview update for existing stack with a new template/parameters."""
data = InstantiationData(body)
@@ -538,7 +539,7 @@ class StackController(object):
return {'resource_changes': changes}
- @util.identified_stack
+ @util.registered_identified_stack
def preview_update_patch(self, req, identity, body):
"""Preview PATCH update for existing stack."""
data = InstantiationData(body, patch=True)
@@ -558,7 +559,7 @@ class StackController(object):
return {'resource_changes': changes}
- @util.identified_stack
+ @util.registered_identified_stack
def delete(self, req, identity):
"""Delete the specified stack."""
@@ -567,7 +568,7 @@ class StackController(object):
cast=False)
raise exc.HTTPNoContent()
- @util.identified_stack
+ @util.registered_identified_stack
def abandon(self, req, identity):
"""Abandons specified stack.
@@ -577,7 +578,7 @@ class StackController(object):
return self.rpc_client.abandon_stack(req.context,
identity)
- @util.identified_stack
+ @util.registered_identified_stack
def export(self, req, identity):
"""Export specified stack.
@@ -585,7 +586,7 @@ class StackController(object):
"""
return self.rpc_client.export_stack(req.context, identity)
- @util.policy_enforce
+ @util.registered_policy_enforce
def validate_template(self, req, body):
"""Implements the ValidateTemplate API action.
@@ -623,7 +624,7 @@ class StackController(object):
return result
- @util.policy_enforce
+ @util.registered_policy_enforce
def list_resource_types(self, req):
"""Returns a resource types list which may be used in template."""
support_status = req.params.get('support_status')
@@ -646,7 +647,7 @@ class StackController(object):
heat_version=version,
with_description=with_description)}
- @util.policy_enforce
+ @util.registered_policy_enforce
def list_template_versions(self, req):
"""Returns a list of available template versions."""
return {
@@ -654,7 +655,7 @@ class StackController(object):
self.rpc_client.list_template_versions(req.context)
}
- @util.policy_enforce
+ @util.registered_policy_enforce
def list_template_functions(self, req, template_version):
"""Returns a list of available functions in a given template."""
if req.params.get('with_condition_func') is not None:
@@ -671,14 +672,14 @@ class StackController(object):
with_condition)
}
- @util.policy_enforce
+ @util.registered_policy_enforce
def resource_schema(self, req, type_name, with_description=False):
"""Returns the schema of the given resource type."""
return self.rpc_client.resource_schema(
req.context, type_name,
self._extract_bool_param('with_description', with_description))
- @util.policy_enforce
+ @util.registered_policy_enforce
def generate_template(self, req, type_name):
"""Generates a template based on the specified type."""
template_type = 'cfn'
@@ -694,42 +695,42 @@ class StackController(object):
type_name,
template_type)
- @util.identified_stack
+ @util.registered_identified_stack
def snapshot(self, req, identity, body):
name = body.get('name')
return self.rpc_client.stack_snapshot(req.context, identity, name)
- @util.identified_stack
+ @util.registered_identified_stack
def show_snapshot(self, req, identity, snapshot_id):
snapshot = self.rpc_client.show_snapshot(
req.context, identity, snapshot_id)
return {'snapshot': snapshot}
- @util.identified_stack
+ @util.registered_identified_stack
def delete_snapshot(self, req, identity, snapshot_id):
self.rpc_client.delete_snapshot(req.context, identity, snapshot_id)
raise exc.HTTPNoContent()
- @util.identified_stack
+ @util.registered_identified_stack
def list_snapshots(self, req, identity):
return {
'snapshots': self.rpc_client.stack_list_snapshots(
req.context, identity)
}
- @util.identified_stack
+ @util.registered_identified_stack
def restore_snapshot(self, req, identity, snapshot_id):
self.rpc_client.stack_restore(req.context, identity, snapshot_id)
raise exc.HTTPAccepted()
- @util.identified_stack
+ @util.registered_identified_stack
def list_outputs(self, req, identity):
return {
'outputs': self.rpc_client.list_outputs(
req.context, identity)
}
- @util.identified_stack
+ @util.registered_identified_stack
def show_output(self, req, identity, output_key):
return {'output': self.rpc_client.show_output(req.context,
identity,
diff --git a/heat/cmd/all.py b/heat/cmd/all.py
index d5ea33f4e..423ef853b 100644
--- a/heat/cmd/all.py
+++ b/heat/cmd/all.py
@@ -25,7 +25,6 @@ import sys
from heat.cmd import api
from heat.cmd import api_cfn
-from heat.cmd import api_cloudwatch
from heat.cmd import engine
from heat.common import config
from heat.common import messaging
@@ -45,7 +44,6 @@ LAUNCH_SERVICES = {
'engine': [engine.launch_engine, {'setup_logging': False}],
'api': [api.launch_api, API_LAUNCH_OPTS],
'api_cfn': [api_cfn.launch_cfn_api, API_LAUNCH_OPTS],
- 'api_cloudwatch': [api_cloudwatch.launch_cloudwatch_api, API_LAUNCH_OPTS],
}
services_opt = cfg.ListOpt(
@@ -53,7 +51,7 @@ services_opt = cfg.ListOpt(
default=['engine', 'api', 'api_cfn'],
help='Specifies the heat services that are enabled when running heat-all. '
'Valid options are all or any combination of '
- 'api, engine, api_cfn, or api_cloudwatch.'
+ 'api, engine or api_cfn.'
)
cfg.CONF.register_opt(services_opt, group='heat_all')
diff --git a/heat/cmd/api_cloudwatch.py b/heat/cmd/api_cloudwatch.py
deleted file mode 100644
index f20eaeb4f..000000000
--- a/heat/cmd/api_cloudwatch.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Heat API Server.
-
-This implements an approximation of the Amazon CloudWatch API and translates it
-into a native representation. It then calls the heat-engine via AMQP RPC to
-implement them.
-"""
-
-import eventlet
-eventlet.monkey_patch(os=False)
-
-import sys
-
-from oslo_config import cfg
-import oslo_i18n as i18n
-from oslo_log import log as logging
-from oslo_reports import guru_meditation_report as gmr
-from oslo_service import systemd
-import six
-
-from heat.common import config
-from heat.common import messaging
-from heat.common import profiler
-from heat.common import wsgi
-from heat import version
-
-i18n.enable_lazy()
-
-LOG = logging.getLogger('heat.api.cloudwatch')
-
-
-def launch_cloudwatch_api(setup_logging=True):
- if setup_logging:
- logging.register_options(cfg.CONF)
- cfg.CONF(project='heat',
- prog='heat-api-cloudwatch',
- version=version.version_info.version_string())
- if setup_logging:
- logging.setup(cfg.CONF, 'heat-api-cloudwatch')
- logging.set_defaults()
- config.set_config_defaults()
- messaging.setup()
-
- app = config.load_paste_app()
-
- port = cfg.CONF.heat_api_cloudwatch.bind_port
- host = cfg.CONF.heat_api_cloudwatch.bind_host
- LOG.info('Starting Heat CloudWatch API on %(host)s:%(port)s',
- {'host': host, 'port': port})
- profiler.setup('heat-api-cloudwatch', host)
- gmr.TextGuruMeditation.setup_autorun(version)
- server = wsgi.Server('heat-api-cloudwatch',
- cfg.CONF.heat_api_cloudwatch)
- server.start(app, default_port=port)
- return server
-
-
-def main():
- try:
- server = launch_cloudwatch_api()
- systemd.notify_once()
- server.wait()
- except RuntimeError as e:
- msg = six.text_type(e)
- sys.exit("ERROR: %s" % msg)
diff --git a/heat/cmd/engine.py b/heat/cmd/engine.py
index 19e8f7d9b..81860fcf9 100644
--- a/heat/cmd/engine.py
+++ b/heat/cmd/engine.py
@@ -74,10 +74,6 @@ def launch_engine(setup_logging=True):
launcher = service.launch(cfg.CONF, srv, workers=workers,
restart_method='mutate')
- if cfg.CONF.enable_cloud_watch_lite:
- # We create the periodic tasks here, which mean they are created
- # only in the parent process when num_engine_workers>1 is specified
- srv.create_periodic_tasks()
return launcher
diff --git a/heat/common/config.py b/heat/common/config.py
index fbd4bf07f..0f9b84555 100644
--- a/heat/common/config.py
+++ b/heat/common/config.py
@@ -47,6 +47,9 @@ service_opts = [
help=_('URL of the Heat waitcondition server.')),
cfg.StrOpt('heat_watch_server_url',
default="",
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch Service has been removed.',
+ deprecated_since='10.0.0',
help=_('URL of the Heat CloudWatch server.')),
cfg.StrOpt('instance_connection_is_secure',
default="0",
@@ -179,6 +182,9 @@ engine_opts = [
' for stack locking.')),
cfg.BoolOpt('enable_cloud_watch_lite',
default=False,
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch Service has been removed.',
+ deprecated_since='10.0.0',
help=_('Enable the legacy OS::Heat::CWLiteAlarm resource.')),
cfg.BoolOpt('enable_stack_abandon',
default=False,
@@ -281,6 +287,7 @@ engine_opts = [
rpc_opts = [
cfg.StrOpt('host',
default=socket.gethostname(),
+ sample_default='<Hostname>',
help=_('Name of the engine node. '
'This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, '
@@ -416,8 +423,8 @@ def list_opts():
for client in ('aodh', 'barbican', 'ceilometer', 'cinder', 'designate',
'glance', 'heat', 'keystone', 'magnum', 'manila', 'mistral',
- 'monasca', 'neutron', 'nova', 'sahara', 'senlin', 'swift',
- 'trove', 'zaqar'
+ 'monasca', 'neutron', 'nova', 'octavia', 'sahara', 'senlin',
+ 'swift', 'trove', 'zaqar'
):
client_specific_group = 'clients_' + client
yield client_specific_group, clients_opts
diff --git a/heat/common/exception.py b/heat/common/exception.py
index f42c9e07f..cf6acb5ce 100644
--- a/heat/common/exception.py
+++ b/heat/common/exception.py
@@ -304,11 +304,6 @@ class ClientNotAvailable(HeatException):
msg_fmt = _("The client (%(client_name)s) is not available.")
-class WatchRuleNotFound(EntityNotFound):
- """Keep this for AWS compatibility."""
- msg_fmt = _("The Watch Rule (%(watch_name)s) could not be found.")
-
-
class ResourceFailure(HeatExceptionWithPath):
def __init__(self, exception_or_error, resource, action=None):
self.resource = resource
diff --git a/heat/common/grouputils.py b/heat/common/grouputils.py
index 2d02d56e3..7abde18fe 100644
--- a/heat/common/grouputils.py
+++ b/heat/common/grouputils.py
@@ -15,20 +15,102 @@ import six
from heat.common import exception
from heat.common.i18n import _
+from heat.engine import status
+from heat.engine import template
+from heat.rpc import api as rpc_api
+
+
+class GroupInspector(object):
+ """A class for returning data about a scaling group.
+
+ All data is fetched over RPC, and the group's stack is never loaded into
+ memory locally. Data is cached so it will be fetched only once. To
+ refresh the data, create a new GroupInspector.
+ """
+
+ def __init__(self, context, rpc_client, group_identity):
+ """Initialise with a context, rpc_client, and stack identifier."""
+ self._context = context
+ self._rpc_client = rpc_client
+ self._identity = group_identity
+ self._member_data = None
+ self._template_data = None
+
+ @classmethod
+ def from_parent_resource(cls, parent_resource):
+ """Create a GroupInspector from a parent resource.
+
+ This is a convenience method to instantiate a GroupInspector from a
+ Heat StackResource object.
+ """
+ return cls(parent_resource.context, parent_resource.rpc_client(),
+ parent_resource.nested_identifier())
+
+ def _get_member_data(self):
+ if self._identity is None:
+ return []
+
+ if self._member_data is None:
+ rsrcs = self._rpc_client.list_stack_resources(self._context,
+ dict(self._identity))
+
+ def sort_key(r):
+ return (r[rpc_api.RES_STATUS] != status.ResourceStatus.FAILED,
+ r[rpc_api.RES_CREATION_TIME],
+ r[rpc_api.RES_NAME])
+
+ self._member_data = sorted(rsrcs, key=sort_key)
+
+ return self._member_data
+
+ def _members(self, include_failed):
+ return (r for r in self._get_member_data()
+ if (include_failed or
+ r[rpc_api.RES_STATUS] != status.ResourceStatus.FAILED))
+
+ def size(self, include_failed):
+ """Return the size of the group.
+
+ If include_failed is False, only members not in a FAILED state will
+ be counted.
+ """
+ return sum(1 for m in self._members(include_failed))
+
+ def member_names(self, include_failed):
+ """Return an iterator over the names of the group members
+
+ If include_failed is False, only members not in a FAILED state will
+ be included.
+ """
+ return (m[rpc_api.RES_NAME] for m in self._members(include_failed))
+
+ def _get_template_data(self):
+ if self._identity is None:
+ return None
+
+ if self._template_data is None:
+ self._template_data = self._rpc_client.get_template(self._context,
+ self._identity)
+ return self._template_data
+
+ def template(self):
+ """Return a Template object representing the group's current template.
+
+ Note that this does *not* include any environment data.
+ """
+ data = self._get_template_data()
+ if data is None:
+ return None
+ return template.Template(data)
def get_size(group, include_failed=False):
"""Get number of member resources managed by the specified group.
- The size exclude failed members default, set include_failed=True
- to get total size.
+ The size excludes failed members by default; set include_failed=True
+ to get the total size.
"""
- if group.nested():
- resources = [r for r in six.itervalues(group.nested())
- if include_failed or r.status != r.FAILED]
- return len(resources)
- else:
- return 0
+ return GroupInspector.from_parent_resource(group).size(include_failed)
def get_members(group, include_failed=False):
@@ -67,7 +149,8 @@ def get_member_names(group):
Failed resources will be ignored.
"""
- return [r.name for r in get_members(group)]
+ inspector = GroupInspector.from_parent_resource(group)
+ return list(inspector.member_names(include_failed=False))
def get_resource(stack, resource_name, use_indices, key=None):
@@ -109,9 +192,15 @@ def get_nested_attrs(stack, key, use_indices, *path):
def get_member_definitions(group, include_failed=False):
"""Get member definitions in (name, ResourceDefinition) pair for group.
- The List is sorted first by created_time then by name.
- If include_failed is set, failed members will be put first in the
- List sorted by created_time then by name.
+ The List is sorted first by created_time then by name.
+ If include_failed is set, failed members will be put first in the
+ List sorted by created_time then by name.
"""
- return [(resource.name, resource.t)
- for resource in get_members(group, include_failed)]
+ inspector = GroupInspector.from_parent_resource(group)
+ template = inspector.template()
+ if template is None:
+ return []
+ definitions = template.resource_definitions(None)
+ return [(name, definitions[name])
+ for name in inspector.member_names(include_failed=include_failed)
+ if name in definitions]
diff --git a/heat/common/policy.py b/heat/common/policy.py
index 767d1b540..821a1f9e4 100644
--- a/heat/common/policy.py
+++ b/heat/common/policy.py
@@ -47,6 +47,7 @@ class Enforcer(object):
self.default_rule = default_rule
self.enforcer = policy.Enforcer(
CONF, default_rule=default_rule, policy_file=policy_file)
+ self.log_not_registered = True
# register rules
self.enforcer.register_defaults(policies.list_rules())
@@ -78,8 +79,11 @@ class Enforcer(object):
do_raise=do_raise,
exc=exc, action=rule)
except policy.PolicyNotRegistered:
- with excutils.save_and_reraise_exception():
- LOG.exception(_('Policy not registered.'))
+ if self.log_not_registered:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_('Policy not registered.'))
+ else:
+ raise
else:
return self.enforcer.enforce(rule, target, credentials,
do_raise, exc=exc, *args, **kwargs)
@@ -108,7 +112,8 @@ class Enforcer(object):
:param context: Heat request context
:returns: A non-False value if the user is admin according to policy
"""
- return self._check(context, 'context_is_admin', target={}, exc=None)
+ return self._check(context, 'context_is_admin', target={}, exc=None,
+ is_registered_policy=True)
def get_enforcer():
@@ -123,13 +128,17 @@ class ResourceEnforcer(Enforcer):
**kwargs):
super(ResourceEnforcer, self).__init__(
default_rule=default_rule, **kwargs)
+ self.log_not_registered = False
- def _enforce(self, context, res_type, scope=None, target=None):
+ def _enforce(self, context, res_type, scope=None, target=None,
+ is_registered_policy=False):
try:
result = super(ResourceEnforcer, self).enforce(
context, res_type,
scope=scope or 'resource_types',
- target=target)
+ target=target, is_registered_policy=is_registered_policy)
+ except policy.PolicyNotRegistered:
+ result = True
except self.exc as ex:
LOG.info(six.text_type(ex))
raise
@@ -138,19 +147,27 @@ class ResourceEnforcer(Enforcer):
raise self.exc(action=res_type)
return result
- def enforce(self, context, res_type, scope=None, target=None):
+ def enforce(self, context, res_type, scope=None, target=None,
+ is_registered_policy=False):
# NOTE(pas-ha): try/except just to log the exception
- result = self._enforce(context, res_type, scope, target)
+ result = self._enforce(context, res_type, scope, target,
+ is_registered_policy=is_registered_policy)
if result:
# check for wildcard resource types
subparts = res_type.split("::")[:-1]
subparts.append('*')
res_type_wc = "::".join(subparts)
- return self._enforce(context, res_type_wc, scope, target)
+ try:
+ return self._enforce(context, res_type_wc, scope, target,
+ is_registered_policy=is_registered_policy)
+ except self.exc:
+ raise self.exc(action=res_type)
return result
- def enforce_stack(self, stack, scope=None, target=None):
+ def enforce_stack(self, stack, scope=None, target=None,
+ is_registered_policy=False):
for res in stack.resources.values():
- self.enforce(stack.context, res.type(), scope=scope, target=target)
+ self.enforce(stack.context, res.type(), scope=scope, target=target,
+ is_registered_policy=is_registered_policy)
diff --git a/heat/common/timeutils.py b/heat/common/timeutils.py
index 570bbf39b..fd0376e3c 100644
--- a/heat/common/timeutils.py
+++ b/heat/common/timeutils.py
@@ -20,7 +20,7 @@ import time
from heat.common.i18n import _
-iso_duration_re = re.compile('PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?$')
+iso_duration_re = re.compile(r'PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?$')
wallclock = time.time
diff --git a/heat/common/wsgi.py b/heat/common/wsgi.py
index c2a81f85d..7bba27abb 100644
--- a/heat/common/wsgi.py
+++ b/heat/common/wsgi.py
@@ -136,34 +136,58 @@ api_cw_opts = [
cfg.IPOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
- deprecated_group='DEFAULT'),
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch API has been removed.',
+ deprecated_since='10.0.0'),
cfg.PortOpt('bind_port', default=8003,
help=_('The port on which the server will listen.'),
- deprecated_group='DEFAULT'),
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch API has been removed.',
+ deprecated_since='10.0.0'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
- deprecated_group='DEFAULT'),
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch API has been removed.',
+ deprecated_since='10.0.0'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
- deprecated_group='DEFAULT'),
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch API has been Removed.',
+ deprecated_since='10.0.0'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
- deprecated_group='DEFAULT'),
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch API has been Removed.',
+ deprecated_since='10.0.0'),
cfg.IntOpt('workers', min=0, default=1,
help=_("Number of workers for Heat service."),
- deprecated_group='DEFAULT'),
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch API has been Removed.',
+ deprecated_since='10.0.0'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
- 'Keystone v3 API with big service catalogs.)')),
+ 'Keystone v3 API with big service catalogs.)'),
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch API has been Removed.',
+ deprecated_since='10.0.0'),
cfg.IntOpt('tcp_keepidle', default=600,
help=_('The value for the socket option TCP_KEEPIDLE. This is '
'the time in seconds that the connection must be idle '
- 'before TCP starts sending keepalive probes.')),
+ 'before TCP starts sending keepalive probes.'),
+ deprecated_for_removal=True,
+ deprecated_reason='Heat CloudWatch API has been Removed.',
+ deprecated_since='10.0.0')
]
api_cw_group = cfg.OptGroup('heat_api_cloudwatch')
cfg.CONF.register_group(api_cw_group)
diff --git a/heat/db/sqlalchemy/api.py b/heat/db/sqlalchemy/api.py
index dfdbcde00..805873e3e 100644
--- a/heat/db/sqlalchemy/api.py
+++ b/heat/db/sqlalchemy/api.py
@@ -178,9 +178,9 @@ def raw_template_files_get(context, files_id):
def resource_get(context, resource_id, refresh=False, refresh_data=False,
eager=True):
query = context.session.query(models.Resource)
+ query = query.options(orm.joinedload("data"))
if eager:
- query = query.options(orm.joinedload("data")).options(
- orm.joinedload("rsrc_prop_data"))
+ query = query.options(orm.joinedload("rsrc_prop_data"))
result = query.get(resource_id)
if not result:
@@ -254,6 +254,8 @@ def _add_atomic_key_to_values(values, atomic_key):
values['atomic_key'] = atomic_key + 1
+@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
+ retry_interval=0.5, inc_retry_interval=True)
def resource_update(context, resource_id, values, atomic_key,
expected_engine_id=None):
session = context.session
@@ -495,12 +497,19 @@ def resource_get_all_active_by_stack(context, stack_id):
return dict((res.id, res) for res in results)
-def resource_get_all_by_root_stack(context, stack_id, filters=None):
+def resource_get_all_by_root_stack(context, stack_id, filters=None,
+ stack_id_only=False):
query = context.session.query(
models.Resource
).filter_by(
root_stack_id=stack_id
- ).options(orm.joinedload("data"))
+ )
+
+ if stack_id_only:
+ query = query.options(orm.load_only("id", "stack_id"))
+ else:
+ query = query.options(orm.joinedload("data")).options(
+ orm.joinedload("rsrc_prop_data"))
query = db_filters.exact_filter(query, models.Resource, filters)
results = query.all()
@@ -802,8 +811,13 @@ def stack_delete(context, stack_id):
delete_softly(context, s)
+def _is_duplicate_error(exc):
+ return isinstance(exc, db_exception.DBDuplicateEntry)
+
+
@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+ retry_interval=0.5, inc_retry_interval=True,
+ exception_checker=_is_duplicate_error)
def stack_lock_create(context, stack_id, engine_id):
with db_context.writer.independent.using(context) as session:
lock = session.query(models.StackLock).get(stack_id)
@@ -1060,78 +1074,6 @@ def event_create(context, values):
return event_ref
-def watch_rule_get(context, watch_rule_id):
- result = context.session.query(models.WatchRule).get(watch_rule_id)
- return result
-
-
-def watch_rule_get_by_name(context, watch_rule_name):
- result = context.session.query(
- models.WatchRule).filter_by(name=watch_rule_name).first()
- return result
-
-
-def watch_rule_get_all(context):
- results = context.session.query(models.WatchRule).all()
- return results
-
-
-def watch_rule_get_all_by_stack(context, stack_id):
- results = context.session.query(
- models.WatchRule).filter_by(stack_id=stack_id).all()
- return results
-
-
-def watch_rule_create(context, values):
- obj_ref = models.WatchRule()
- obj_ref.update(values)
- obj_ref.save(context.session)
- return obj_ref
-
-
-def watch_rule_update(context, watch_id, values):
- wr = watch_rule_get(context, watch_id)
-
- if not wr:
- raise exception.NotFound(_('Attempt to update a watch with id: '
- '%(id)s %(msg)s') % {
- 'id': watch_id,
- 'msg': 'that does not exist'})
- wr.update(values)
- wr.save(context.session)
-
-
-def watch_rule_delete(context, watch_id):
- wr = watch_rule_get(context, watch_id)
- if not wr:
- raise exception.NotFound(_('Attempt to delete watch_rule: '
- '%(id)s %(msg)s') % {
- 'id': watch_id,
- 'msg': 'that does not exist'})
- with context.session.begin():
- for d in wr.watch_data:
- context.session.delete(d)
- context.session.delete(wr)
-
-
-def watch_data_create(context, values):
- obj_ref = models.WatchData()
- obj_ref.update(values)
- obj_ref.save(context.session)
- return obj_ref
-
-
-def watch_data_get_all(context):
- results = context.session.query(models.WatchData).all()
- return results
-
-
-def watch_data_get_all_by_watch_rule_id(context, watch_rule_id):
- results = context.session.query(models.WatchData).filter_by(
- watch_rule_id=watch_rule_id).all()
- return results
-
-
def software_config_create(context, values):
obj_ref = models.SoftwareConfig()
obj_ref.update(values)
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/086_drop_watch_rule_watch_data_tables.py b/heat/db/sqlalchemy/migrate_repo/versions/086_drop_watch_rule_watch_data_tables.py
new file mode 100644
index 000000000..a99ac5c4e
--- /dev/null
+++ b/heat/db/sqlalchemy/migrate_repo/versions/086_drop_watch_rule_watch_data_tables.py
@@ -0,0 +1,53 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate import ForeignKeyConstraint
+from sqlalchemy.engine import reflection
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+
+
+def upgrade(engine):
+ meta = MetaData()
+ meta.bind = engine
+
+ def _get_columns(source_table, params):
+ columns = set()
+ for column in params:
+ columns.add(source_table.c[column])
+ return columns
+
+ def _remove_foreign_key_constraints(engine, meta, table_name):
+ inspector = reflection.Inspector.from_engine(engine)
+
+ for fk in inspector.get_foreign_keys(table_name):
+ source_table = Table(table_name, meta, autoload=True)
+ target_table = Table(fk['referred_table'], meta, autoload=True)
+
+ fkey = ForeignKeyConstraint(
+ columns=_get_columns(source_table, fk['constrained_columns']),
+ refcolumns=_get_columns(target_table, fk['referred_columns']),
+ name=fk['name'])
+ fkey.drop()
+
+ def _drop_table_and_indexes(meta, table_name):
+ table = Table(table_name, meta, autoload=True)
+ for index in table.indexes:
+ index.drop()
+ table.drop()
+
+ table_names = ('watch_data', 'watch_rule')
+
+ for table_name in table_names:
+ _remove_foreign_key_constraints(engine, meta, table_name)
+ _drop_table_and_indexes(meta, table_name)
diff --git a/heat/db/sqlalchemy/models.py b/heat/db/sqlalchemy/models.py
index 6a6c7a6c7..ca208bef0 100644
--- a/heat/db/sqlalchemy/models.py
+++ b/heat/db/sqlalchemy/models.py
@@ -16,7 +16,6 @@
import uuid
from oslo_db.sqlalchemy import models
-from oslo_utils import timeutils
import sqlalchemy
from sqlalchemy.ext import declarative
from sqlalchemy.orm import backref
@@ -307,39 +306,6 @@ class Resource(BASE, HeatBase, StateAware):
sqlalchemy.ForeignKey('raw_template.id'))
-class WatchRule(BASE, HeatBase):
- """Represents a watch_rule created by the heat engine."""
-
- __tablename__ = 'watch_rule'
-
- id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
- name = sqlalchemy.Column('name', sqlalchemy.String(255))
- rule = sqlalchemy.Column('rule', types.Json)
- state = sqlalchemy.Column('state', sqlalchemy.String(255))
- last_evaluated = sqlalchemy.Column(sqlalchemy.DateTime,
- default=timeutils.utcnow)
-
- stack_id = sqlalchemy.Column(sqlalchemy.String(36),
- sqlalchemy.ForeignKey('stack.id'),
- nullable=False)
- stack = relationship(Stack, backref=backref('watch_rule'))
-
-
-class WatchData(BASE, HeatBase):
- """Represents a watch_data created by the heat engine."""
-
- __tablename__ = 'watch_data'
-
- id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
- data = sqlalchemy.Column('data', types.Json)
-
- watch_rule_id = sqlalchemy.Column(
- sqlalchemy.Integer,
- sqlalchemy.ForeignKey('watch_rule.id'),
- nullable=False)
- watch_rule = relationship(WatchRule, backref=backref('watch_data'))
-
-
class SoftwareConfig(BASE, HeatBase):
"""Represents a software configuration resource.
diff --git a/heat/engine/api.py b/heat/engine/api.py
index 8cf958ff5..2c5840683 100644
--- a/heat/engine/api.py
+++ b/heat/engine/api.py
@@ -233,7 +233,8 @@ def format_stack(stack, preview=False, resolve_outputs=True):
info.update(update_info)
# allow users to view the outputs of stacks
- if stack.action != stack.DELETE and resolve_outputs:
+ if (not (stack.action == stack.DELETE and stack.status == stack.COMPLETE)
+ and resolve_outputs):
info[rpc_api.STACK_OUTPUTS] = format_stack_outputs(stack.outputs,
resolve_value=True)
diff --git a/heat/engine/cfn/functions.py b/heat/engine/cfn/functions.py
index d354d8dba..c0a138524 100644
--- a/heat/engine/cfn/functions.py
+++ b/heat/engine/cfn/functions.py
@@ -99,7 +99,7 @@ def Ref(stack, fn_name, args):
{ "Ref" : "<resource_name>" }
"""
- if args in stack:
+ if stack is None or args in stack:
RefClass = hot_funcs.GetResource
else:
RefClass = ParamRef
diff --git a/heat/engine/check_resource.py b/heat/engine/check_resource.py
index cd3c8a5c9..1cf7556b1 100644
--- a/heat/engine/check_resource.py
+++ b/heat/engine/check_resource.py
@@ -249,7 +249,8 @@ class CheckResource(object):
try:
input_forward_data = None
- for req_node in deps.required_by(graph_key):
+ for req_node in sorted(deps.required_by(graph_key),
+ key=lambda n: n.is_update):
input_data = _get_input_data(req_node, input_forward_data)
if req_node.is_update:
input_forward_data = input_data
@@ -304,6 +305,9 @@ class CheckResource(object):
if is_update:
if (rsrc.replaced_by is not None and
rsrc.current_template_id != tmpl.id):
+ LOG.debug('Resource %s with id %s already replaced by %s; '
+ 'not checking',
+ rsrc.name, resource_id, rsrc.replaced_by)
return
try:
diff --git a/heat/engine/clients/client_exception.py b/heat/engine/clients/client_exception.py
index 19a58bf0a..b2529535d 100644
--- a/heat/engine/clients/client_exception.py
+++ b/heat/engine/clients/client_exception.py
@@ -25,3 +25,7 @@ class EntityMatchNotFound(exception.HeatException):
class EntityUniqueMatchNotFound(EntityMatchNotFound):
msg_fmt = _("No %(entity)s unique match found for %(args)s.")
+
+
+class InterfaceNotFound(exception.HeatException):
+ msg_fmt = _("No network interface found for server %(id)s.")
diff --git a/heat/engine/clients/os/barbican.py b/heat/engine/clients/os/barbican.py
index 846098499..76400f8c8 100644
--- a/heat/engine/clients/os/barbican.py
+++ b/heat/engine/clients/os/barbican.py
@@ -10,17 +10,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-from barbicanclient import client as barbican_client
+
from barbicanclient import exceptions
+from barbicanclient.v1 import client as barbican_client
+from barbicanclient.v1 import containers
from heat.common import exception
from heat.engine.clients import client_plugin
from heat.engine import constraints
-try:
- from barbicanclient.v1 import containers
-except ImportError:
- from barbicanclient import containers
CLIENT_NAME = 'barbican'
diff --git a/heat/engine/clients/os/cinder.py b/heat/engine/clients/os/cinder.py
index 6e51c2b9f..0a3be5caf 100644
--- a/heat/engine/clients/os/cinder.py
+++ b/heat/engine/clients/os/cinder.py
@@ -154,7 +154,7 @@ class CinderClientPlugin(client_plugin.ClientPlugin):
def check_attach_volume_complete(self, vol_id):
vol = self.client().volumes.get(vol_id)
- if vol.status in ('available', 'attaching'):
+ if vol.status in ('available', 'attaching', 'reserved'):
LOG.debug("Volume %(id)s is being attached - "
"volume status: %(status)s",
{'id': vol_id, 'status': vol.status})
diff --git a/heat/engine/clients/os/heat_plugin.py b/heat/engine/clients/os/heat_plugin.py
index 3cb408d14..fb5132893 100644
--- a/heat/engine/clients/os/heat_plugin.py
+++ b/heat/engine/clients/os/heat_plugin.py
@@ -12,8 +12,6 @@
# under the License.
from oslo_config import cfg
-import six
-from six.moves import urllib
from heatclient import client as hc
from heatclient import exc
@@ -90,28 +88,5 @@ class HeatClientPlugin(client_plugin.ClientPlugin):
config_url += '/'
return config_url
- def get_watch_server_url(self):
- cfn_url = self.get_heat_cfn_url()
- parsed_url = urllib.parse.urlparse(cfn_url)
- host = parsed_url.hostname
- port = parsed_url.port
- # For ipv6 we need to include the host in brackets
- if parsed_url.netloc.startswith('['):
- host = "[%s]" % host
- # The old url model, like http://localhost:port/v1
- if port:
- watch_api_port = (
- six.text_type(cfg.CONF.heat_api_cloudwatch.bind_port))
- replaced_netloc = ':'.join([host, str(watch_api_port)])
- parsed_url = parsed_url._replace(netloc=replaced_netloc)
- # The uwsgi url mode, like http://ip/heat-api-cfn/v1
- else:
- paths = parsed_url.path.split('/')
- paths[1] = 'heat-api-cloudwatch'
- replaced_paths = '/'.join(paths)
- parsed_url = parsed_url._replace(path=replaced_paths)
-
- return urllib.parse.urlunparse(parsed_url)
-
def get_insecure_option(self):
return self._get_client_option(CLIENT_NAME, 'insecure')
diff --git a/heat/engine/clients/os/monasca.py b/heat/engine/clients/os/monasca.py
index d8b5dd6aa..3fb656d32 100644
--- a/heat/engine/clients/os/monasca.py
+++ b/heat/engine/clients/os/monasca.py
@@ -32,15 +32,10 @@ class MonascaClientPlugin(client_plugin.ClientPlugin):
interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
endpoint = self.url_for(service_type=self.MONITORING,
endpoint_type=interface)
- # Change this to use session once it's supported by monascaclient
return client.Client(
self.VERSION,
- token=self.context.keystone_session.get_token(),
- endpoint=endpoint,
- cacert=self._get_client_option(CLIENT_NAME, 'ca_file'),
- cert_file=self._get_client_option(CLIENT_NAME, 'cert_file'),
- key_file=self._get_client_option(CLIENT_NAME, 'key_file'),
- insecure=self._get_client_option(CLIENT_NAME, 'insecure'))
+ session=self.context.keystone_session,
+ endpoint=endpoint)
def is_not_found(self, ex):
return isinstance(ex, monasca_exc.NotFound)
diff --git a/heat/engine/clients/os/nova.py b/heat/engine/clients/os/nova.py
index 52d10cb9b..80efb84b2 100644
--- a/heat/engine/clients/os/nova.py
+++ b/heat/engine/clients/os/nova.py
@@ -19,17 +19,20 @@ import os
import pkgutil
import string
+from neutronclient.common import exceptions as q_exceptions
from novaclient import client as nc
from novaclient import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils import netutils
import six
from six.moves.urllib import parse as urlparse
import tenacity
from heat.common import exception
from heat.common.i18n import _
+from heat.engine.clients import client_exception
from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
from heat.engine import constraints
@@ -100,7 +103,8 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
return client
def is_not_found(self, ex):
- return isinstance(ex, exceptions.NotFound)
+ return isinstance(ex, (exceptions.NotFound,
+ q_exceptions.NotFound))
def is_over_limit(self, ex):
return isinstance(ex, exceptions.OverLimit)
@@ -395,21 +399,13 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
if is_cfntools:
heat_client_plugin = self.context.clients.client_plugin('heat')
- watch_url = cfg.CONF.heat_watch_server_url
- if not watch_url:
- watch_url = heat_client_plugin.get_watch_server_url()
-
- attachments.append((watch_url,
- 'cfn-watch-server', 'x-cfninitdata'))
-
cfn_md_url = heat_client_plugin.get_cfn_metadata_server_url()
attachments.append((cfn_md_url,
'cfn-metadata-server', 'x-cfninitdata'))
# Create a boto config which the cfntools on the host use to know
- # where the cfn and cw API's are to be accessed
+ # where the cfn API is to be accessed
cfn_url = urlparse.urlparse(cfn_md_url)
- cw_url = urlparse.urlparse(watch_url)
is_secure = cfg.CONF.instance_connection_is_secure
vcerts = cfg.CONF.instance_connection_https_validate_certificates
boto_cfg = "\n".join(["[Boto]",
@@ -418,10 +414,7 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
"https_validate_certificates = %s" % vcerts,
"cfn_region_name = heat",
"cfn_region_endpoint = %s" %
- cfn_url.hostname,
- "cloudwatch_region_name = heat",
- "cloudwatch_region_endpoint = %s" %
- cw_url.hostname])
+ cfn_url.hostname])
attachments.append((boto_cfg,
'cfn-boto-cfg', 'x-cfninitdata'))
@@ -445,9 +438,12 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
return False
status = self.get_status(server)
- if status in ("DELETED", "SOFT_DELETED"):
+ if status == 'DELETED':
return True
- if status == 'ERROR':
+
+ if status == 'SOFT_DELETED':
+ self.client().servers.force_delete(server_id)
+ elif status == 'ERROR':
fault = getattr(server, 'fault', {})
message = fault.get('message', 'Unknown')
code = fault.get('code')
@@ -680,6 +676,54 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
{'att': attach_id, 'srv': server_id})
return False
+ def associate_floatingip(self, server_id, floatingip_id):
+ iface_list = self.fetch_server(server_id).interface_list()
+ if len(iface_list) == 0:
+ raise client_exception.InterfaceNotFound(id=server_id)
+ if len(iface_list) > 1:
+ LOG.warning("Multiple interfaces found for server %s, "
+ "using the first one.", server_id)
+
+ port_id = iface_list[0].port_id
+ fixed_ips = iface_list[0].fixed_ips
+ fixed_address = next(ip['ip_address'] for ip in fixed_ips
+ if netutils.is_valid_ipv4(ip['ip_address']))
+ request_body = {
+ 'floatingip': {
+ 'port_id': port_id,
+ 'fixed_ip_address': fixed_address}}
+
+ self.clients.client('neutron').update_floatingip(floatingip_id,
+ request_body)
+
+ def dissociate_floatingip(self, floatingip_id):
+ request_body = {
+ 'floatingip': {
+ 'port_id': None,
+ 'fixed_ip_address': None}}
+ self.clients.client('neutron').update_floatingip(floatingip_id,
+ request_body)
+
+ def associate_floatingip_address(self, server_id, fip_address):
+ fips = self.clients.client(
+ 'neutron').list_floatingips(
+ floating_ip_address=fip_address)['floatingips']
+ if len(fips) == 0:
+ args = {'ip_address': fip_address}
+ raise client_exception.EntityMatchNotFound(entity='floatingip',
+ args=args)
+ self.associate_floatingip(server_id, fips[0]['id'])
+
+ def dissociate_floatingip_address(self, fip_address):
+ fips = self.clients.client(
+ 'neutron').list_floatingips(
+ floating_ip_address=fip_address)['floatingips']
+ if len(fips) == 0:
+ args = {'ip_address': fip_address}
+ raise client_exception.EntityMatchNotFound(entity='floatingip',
+ args=args)
+ self.dissociate_floatingip(fips[0]['id'])
+
def interface_detach(self, server_id, port_id):
with self.ignore_not_found:
server = self.fetch_server(server_id)
diff --git a/heat/engine/clients/os/octavia.py b/heat/engine/clients/os/octavia.py
new file mode 100644
index 000000000..c865e01ca
--- /dev/null
+++ b/heat/engine/clients/os/octavia.py
@@ -0,0 +1,107 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from octaviaclient.api import constants
+from octaviaclient.api.v2 import octavia
+from osc_lib import exceptions
+
+from heat.engine.clients import client_plugin
+from heat.engine import constraints
+
+CLIENT_NAME = 'octavia'
+DEFAULT_FIND_ATTR = 'name'
+
+
+def _is_translated_exception(ex, code):
+ return (isinstance(ex, octavia.OctaviaClientException)
+ and ex.code == code)
+
+
+class OctaviaClientPlugin(client_plugin.ClientPlugin):
+
+ exceptions_module = octavia
+
+ service_types = [LOADBALANCER] = ['load-balancer']
+
+ supported_versions = [V2] = ['2']
+
+ default_version = V2
+
+ def _create(self, version=None):
+ interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
+ endpoint = self.url_for(service_type=self.LOADBALANCER,
+ endpoint_type=interface)
+ return octavia.OctaviaAPI(
+ session=self.context.keystone_session,
+ service_type=self.LOADBALANCER,
+ endpoint=endpoint)
+
+ def is_not_found(self, ex):
+ return isinstance(
+ ex, exceptions.NotFound) or _is_translated_exception(ex, 404)
+
+ def is_over_limit(self, ex):
+ return isinstance(
+ ex, exceptions.OverLimit) or _is_translated_exception(ex, 413)
+
+ def is_conflict(self, ex):
+ return isinstance(
+ ex, exceptions.Conflict) or _is_translated_exception(ex, 409)
+
+ def get_pool(self, value):
+ pool = self.client().find(path=constants.BASE_POOL_URL,
+ value=value, attr=DEFAULT_FIND_ATTR)
+ return pool['id']
+
+ def get_listener(self, value):
+ lsnr = self.client().find(path=constants.BASE_LISTENER_URL,
+ value=value, attr=DEFAULT_FIND_ATTR)
+ return lsnr['id']
+
+ def get_loadbalancer(self, value):
+ lb = self.client().find(path=constants.BASE_LOADBALANCER_URL,
+ value=value, attr=DEFAULT_FIND_ATTR)
+ return lb['id']
+
+ def get_l7policy(self, value):
+ policy = self.client().find(path=constants.BASE_L7POLICY_URL,
+ value=value, attr=DEFAULT_FIND_ATTR)
+ return policy['id']
+
+
+class OctaviaConstraint(constraints.BaseCustomConstraint):
+
+ expected_exceptions = (exceptions.NotFound,
+ octavia.OctaviaClientException)
+ base_url = None
+
+ def validate_with_client(self, client, value):
+ octavia_client = client.client(CLIENT_NAME)
+ octavia_client.find(path=self.base_url, value=value,
+ attr=DEFAULT_FIND_ATTR)
+
+
+class LoadbalancerConstraint(OctaviaConstraint):
+ base_url = constants.BASE_LOADBALANCER_URL
+
+
+class ListenerConstraint(OctaviaConstraint):
+ base_url = constants.BASE_LISTENER_URL
+
+
+class PoolConstraint(OctaviaConstraint):
+ base_url = constants.BASE_POOL_URL
+
+
+class L7PolicyConstraint(OctaviaConstraint):
+ base_url = constants.BASE_L7POLICY_URL
diff --git a/heat/engine/clients/os/openstacksdk.py b/heat/engine/clients/os/openstacksdk.py
index af6f32b98..8be8612dc 100644
--- a/heat/engine/clients/os/openstacksdk.py
+++ b/heat/engine/clients/os/openstacksdk.py
@@ -11,12 +11,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+from openstack.config import cloud_region
from openstack import connection
from openstack import exceptions
-from openstack import profile
+import os_service_types
+from heat.common import config
from heat.engine.clients import client_plugin
from heat.engine import constraints
+import heat.version
CLIENT_NAME = 'openstack'
@@ -25,24 +28,39 @@ class OpenStackSDKPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
- service_types = [NETWORK] = ['network']
- service_client_map = {NETWORK: 'neutron'}
- api_version_map = {NETWORK: '2.0'}
+ service_types = [NETWORK, CLUSTERING] = ['network', 'clustering']
def _create(self, version=None):
- prof = profile.Profile()
- for svc_type in self.service_types:
- interface = self._get_client_option(
- self.service_client_map[svc_type], 'endpoint_type')
- prof.set_interface(svc_type, interface)
- prof.set_region(svc_type, self._get_region_name())
- prof.set_version(svc_type, self.api_version_map[svc_type])
-
- key_session = self.context.keystone_session
- return connection.Connection(authenticator=key_session.auth,
- verify=key_session.verify,
- cert=key_session.cert,
- profile=prof)
+ config = cloud_region.from_session(
+ # TODO(mordred) The way from_session calculates a cloud name
+ # doesn't interact well with the mocks in the test cases. The
+ # name is used in logging to distinguish requests made to different
+ # clouds. For now, set it to local - but maybe find a way to set
+ # it to something more meaningful later.
+ name='local',
+ session=self.context.keystone_session,
+ config=self._get_service_interfaces(),
+ region_name=self._get_region_name(),
+ app_name='heat',
+ app_version=heat.version.version_info.version_string())
+ return connection.Connection(config=config)
+
+ def _get_service_interfaces(self):
+ interfaces = {}
+ if not os_service_types:
+ return interfaces
+ types = os_service_types.ServiceTypes()
+ for name, _ in config.list_opts():
+ if not name or not name.startswith('clients_'):
+ continue
+ project_name = name.split("_", 1)[0]
+ service_data = types.get_service_data_for_project(project_name)
+ if not service_data:
+ continue
+ service_type = service_data['service_type']
+ interfaces[service_type + '_interface'] = self._get_client_option(
+ service_type, 'endpoint_type')
+ return interfaces
def is_not_found(self, ex):
return isinstance(ex, exceptions.ResourceNotFound)
diff --git a/heat/engine/clients/os/senlin.py b/heat/engine/clients/os/senlin.py
index e77eab1b2..66cb7dccb 100644
--- a/heat/engine/clients/os/senlin.py
+++ b/heat/engine/clients/os/senlin.py
@@ -11,34 +11,23 @@
# License for the specific language governing permissions and limitations
# under the License.
+from openstack import exceptions
+
from heat.common import exception
from heat.common.i18n import _
-from heat.engine.clients import client_plugin
+from heat.engine.clients.os import openstacksdk as sdk_plugin
from heat.engine import constraints
-from openstack import profile
-from openstack import session
-from senlinclient import client
-from senlinclient.common import exc
-
CLIENT_NAME = 'senlin'
-class SenlinClientPlugin(client_plugin.ClientPlugin):
+class SenlinClientPlugin(sdk_plugin.OpenStackSDKPlugin):
- service_types = [CLUSTERING] = ['clustering']
- VERSION = '1'
+ exceptions_module = exceptions
- def _create(self):
- interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
- prof = profile.Profile()
- prof.set_interface(self.CLUSTERING, interface)
- prof.set_region(self.CLUSTERING, self._get_region_name())
- keystone_session = self.context.keystone_session
- s = session.Session(session=keystone_session,
- auth=keystone_session.auth,
- profile=prof)
- return client.Client(self.VERSION, session=s)
+ def _create(self, version=None):
+ client = super(SenlinClientPlugin, self)._create(version=version)
+ return client.clustering
def generate_spec(self, spec_type, spec_props):
spec = {'properties': spec_props}
@@ -68,12 +57,9 @@ class SenlinClientPlugin(client_plugin.ClientPlugin):
policy = self.client().get_policy(policy_name)
return policy.id
- def is_not_found(self, ex):
- return isinstance(ex, exc.sdkexc.ResourceNotFound)
-
def is_bad_request(self, ex):
- return (isinstance(ex, exc.sdkexc.HttpException) and
- ex.http_status == 400)
+ return (isinstance(ex, exceptions.HttpException) and
+ ex.status_code == 400)
def execute_actions(self, actions):
all_executed = True
@@ -97,24 +83,24 @@ class SenlinClientPlugin(client_plugin.ClientPlugin):
class ProfileConstraint(constraints.BaseCustomConstraint):
- # If name is not unique, will raise exc.sdkexc.HttpException
- expected_exceptions = (exc.sdkexc.HttpException,)
+ # If name is not unique, will raise exceptions.HttpException
+ expected_exceptions = (exceptions.HttpException,)
def validate_with_client(self, client, profile):
client.client(CLIENT_NAME).get_profile(profile)
class ClusterConstraint(constraints.BaseCustomConstraint):
- # If name is not unique, will raise exc.sdkexc.HttpException
- expected_exceptions = (exc.sdkexc.HttpException,)
+ # If name is not unique, will raise exceptions.HttpException
+ expected_exceptions = (exceptions.HttpException,)
def validate_with_client(self, client, value):
client.client(CLIENT_NAME).get_cluster(value)
class PolicyConstraint(constraints.BaseCustomConstraint):
- # If name is not unique, will raise exc.sdkexc.HttpException
- expected_exceptions = (exc.sdkexc.HttpException,)
+ # If name is not unique, will raise exceptions.HttpException
+ expected_exceptions = (exceptions.HttpException,)
def validate_with_client(self, client, value):
client.client(CLIENT_NAME).get_policy(value)
@@ -125,8 +111,8 @@ class ProfileTypeConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.StackValidationFailed,)
def validate_with_client(self, client, value):
- senlin_client = client.client(CLIENT_NAME)
- type_list = senlin_client.profile_types()
+ conn = client.client(CLIENT_NAME)
+ type_list = conn.profile_types()
names = [pt.name for pt in type_list]
if value not in names:
not_found_message = (
@@ -142,8 +128,8 @@ class PolicyTypeConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.StackValidationFailed,)
def validate_with_client(self, client, value):
- senlin_client = client.client(CLIENT_NAME)
- type_list = senlin_client.policy_types()
+ conn = client.client(CLIENT_NAME)
+ type_list = conn.policy_types()
names = [pt.name for pt in type_list]
if value not in names:
not_found_message = (
diff --git a/heat/engine/clients/os/zun.py b/heat/engine/clients/os/zun.py
index bb9287a69..af829275c 100644
--- a/heat/engine/clients/os/zun.py
+++ b/heat/engine/clients/os/zun.py
@@ -11,8 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from zunclient import client as zun_client
from zunclient import exceptions as zc_exc
-from zunclient.v1 import client as zun_client
from heat.engine.clients import client_plugin
@@ -23,7 +23,17 @@ class ZunClientPlugin(client_plugin.ClientPlugin):
service_types = [CONTAINER] = ['container']
- def _create(self):
+ default_version = '1.12'
+
+ supported_versions = [
+ V1_12
+ ] = [
+ '1.12'
+ ]
+
+ def _create(self, version=None):
+ if not version:
+ version = self.default_version
interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
args = {
'interface': interface,
@@ -32,7 +42,7 @@ class ZunClientPlugin(client_plugin.ClientPlugin):
'region_name': self._get_region_name()
}
- client = zun_client.Client(**args)
+ client = zun_client.Client(version, **args)
return client
def is_not_found(self, ex):
diff --git a/heat/engine/constraint/heat_constraints.py b/heat/engine/constraint/heat_constraints.py
deleted file mode 100644
index 810bb5312..000000000
--- a/heat/engine/constraint/heat_constraints.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-
-import six
-
-from heat.common.i18n import _
-from heat.engine import constraints
-
-
-class ResourceTypeConstraint(constraints.BaseCustomConstraint):
-
- def validate(self, value, context, template=None):
-
- if not isinstance(value, collections.Sequence):
- return False
-
- if isinstance(value, six.string_types):
- value = [value]
-
- invalid_types = []
- for t in value:
- try:
- template.env.get_class(t)
- except Exception:
- invalid_types.append(t)
-
- if invalid_types:
- msg = _('The following resource types could not be found: %s')
- types = ','.join(invalid_types)
- self._error_message = msg % types
- return False
-
- return True
diff --git a/heat/engine/constraints.py b/heat/engine/constraints.py
index ca512d10f..d1da790e5 100644
--- a/heat/engine/constraints.py
+++ b/heat/engine/constraints.py
@@ -204,15 +204,14 @@ class Schema(collections.Mapping):
return value
- def validate_constraints(self, value, context=None, skipped=None,
- template=None):
+ def validate_constraints(self, value, context=None, skipped=None):
if not skipped:
skipped = []
try:
for constraint in self.constraints:
if type(constraint) not in skipped:
- constraint.validate(value, self, context, template)
+ constraint.validate(value, self, context)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
@@ -296,8 +295,8 @@ class Constraint(collections.Mapping):
return '\n'.join(desc())
- def validate(self, value, schema=None, context=None, template=None):
- if not self._is_valid(value, schema, context, template):
+ def validate(self, value, schema=None, context=None):
+ if not self._is_valid(value, schema, context):
if self.description:
err_msg = self.description
else:
@@ -374,7 +373,7 @@ class Range(Constraint):
self.min,
self.max)
- def _is_valid(self, value, schema, context, template):
+ def _is_valid(self, value, schema, context):
value = Schema.str_to_num(value)
if self.min is not None:
@@ -437,9 +436,8 @@ class Length(Range):
self.min,
self.max)
- def _is_valid(self, value, schema, context, template):
- return super(Length, self)._is_valid(len(value), schema, context,
- template)
+ def _is_valid(self, value, schema, context):
+ return super(Length, self)._is_valid(len(value), schema, context)
class Modulo(Constraint):
@@ -503,7 +501,7 @@ class Modulo(Constraint):
return '%s is not a multiple of %s with an offset of %s)' % (
value, self.step, self.offset)
- def _is_valid(self, value, schema, context, template):
+ def _is_valid(self, value, schema, context):
value = Schema.str_to_num(value)
if value % self.step != self.offset:
@@ -551,7 +549,7 @@ class AllowedValues(Constraint):
allowed = '[%s]' % ', '.join(str(a) for a in self.allowed)
return '"%s" is not an allowed value %s' % (value, allowed)
- def _is_valid(self, value, schema, context, template):
+ def _is_valid(self, value, schema, context):
# For list values, check if all elements of the list are contained
# in allowed list.
if isinstance(value, list):
@@ -594,7 +592,7 @@ class AllowedPattern(Constraint):
def _err_msg(self, value):
return '"%s" does not match pattern "%s"' % (value, self.pattern)
- def _is_valid(self, value, schema, context, template):
+ def _is_valid(self, value, schema, context):
match = self.match(value)
return match is not None and match.end() == len(value)
@@ -645,19 +643,11 @@ class CustomConstraint(Constraint):
return _('"%(value)s" does not validate %(name)s') % {
"value": value, "name": self.name}
- def _is_valid(self, value, schema, context, template):
+ def _is_valid(self, value, schema, context):
constraint = self.custom_constraint
if not constraint:
return False
-
- try:
- result = constraint.validate(value, context,
- template=template)
- except TypeError:
- # for backwards compatibility with older service constraints
- result = constraint.validate(value, context)
-
- return result
+ return constraint.validate(value, context)
class BaseCustomConstraint(object):
@@ -679,7 +669,7 @@ class BaseCustomConstraint(object):
return _("Error validating value '%(value)s': %(message)s") % {
"value": value, "message": self._error_message}
- def validate(self, value, context, template=None):
+ def validate(self, value, context):
@MEMOIZE
def check_cache_or_validate_value(cache_value_prefix,
diff --git a/heat/engine/environment.py b/heat/engine/environment.py
index b97875bd6..74bfe7f11 100644
--- a/heat/engine/environment.py
+++ b/heat/engine/environment.py
@@ -618,7 +618,7 @@ class ResourceRegistry(object):
if cnxt is None:
return True
try:
- enforcer.enforce(cnxt, name)
+ enforcer.enforce(cnxt, name, is_registered_policy=True)
except enforcer.exc:
return False
else:
diff --git a/heat/engine/function.py b/heat/engine/function.py
index 0dac9f86a..234a0cb6c 100644
--- a/heat/engine/function.py
+++ b/heat/engine/function.py
@@ -267,29 +267,30 @@ def resolve(snippet):
return snippet
-def validate(snippet, path=''):
+def validate(snippet, path=None):
+ if path is None:
+ path = []
+ elif isinstance(path, six.string_types):
+ path = [path]
+
if isinstance(snippet, Function):
try:
snippet.validate()
except AssertionError:
raise
except Exception as e:
- path = '.'.join([path, snippet.fn_name])
raise exception.StackValidationFailed(
- path=path, message=six.text_type(e))
+ path=path + [snippet.fn_name],
+ message=six.text_type(e))
elif isinstance(snippet, collections.Mapping):
- def mkpath(key):
- return '.'.join([path, key])
-
for k, v in six.iteritems(snippet):
- validate(v, mkpath(k))
+ validate(v, path + [k])
elif (not isinstance(snippet, six.string_types) and
isinstance(snippet, collections.Iterable)):
- def mkpath(indx):
- return '.'.join([path, '[%d]' % indx])
-
+ basepath = list(path)
+ parent = basepath.pop() if basepath else ''
for i, v in enumerate(snippet):
- validate(v, mkpath(i))
+ validate(v, basepath + ['%s[%d]' % (parent, i)])
def dependencies(snippet, path=''):
diff --git a/heat/engine/hot/functions.py b/heat/engine/hot/functions.py
index dd248f4c4..5668d5d3e 100644
--- a/heat/engine/hot/functions.py
+++ b/heat/engine/hot/functions.py
@@ -947,12 +947,16 @@ class Repeat(function.Function):
keys, lists = six.moves.zip(*for_each.items())
# use empty list for references(None) else validation will fail
- values = [[] if value is None else value for value in lists]
value_lens = []
- for arg in values:
- self._valid_arg(arg)
- value_lens.append(len(arg))
- if not self._nested_loop:
+ values = []
+ for value in lists:
+ if value is None:
+ values.append([])
+ else:
+ self._valid_arg(value)
+ values.append(value)
+ value_lens.append(len(value))
+ if not self._nested_loop and value_lens:
if len(set(value_lens)) != 1:
raise ValueError(_('For %s, the length of for_each values '
'should be equal if no nested '
diff --git a/heat/engine/output.py b/heat/engine/output.py
index 73c41e62f..1c12232ce 100644
--- a/heat/engine/output.py
+++ b/heat/engine/output.py
@@ -19,6 +19,15 @@ from heat.common import exception
from heat.engine import function
+# Field names that can be passed to Template.get_section_name() in order to
+# determine the appropriate name for a particular template format.
+FIELDS = (
+ VALUE, DESCRIPTION,
+) = (
+ 'Value', 'Description',
+)
+
+
class OutputDefinition(object):
"""A definition of a stack output, independent of any template format."""
@@ -30,9 +39,9 @@ class OutputDefinition(object):
self._deps = None
self._all_dep_attrs = None
- def validate(self, path=''):
+ def validate(self):
"""Validate the output value without resolving it."""
- function.validate(self._value, path)
+ function.validate(self._value, VALUE)
def required_resource_names(self):
if self._deps is None:
diff --git a/heat/engine/parameters.py b/heat/engine/parameters.py
index 2222c6034..168ac3e59 100644
--- a/heat/engine/parameters.py
+++ b/heat/engine/parameters.py
@@ -172,9 +172,8 @@ class Schema(constr.Schema):
'false')).lower() == 'true',
label=schema_dict.get(LABEL))
- def validate_value(self, value, context=None, template=None):
- super(Schema, self).validate_constraints(value, context=context,
- template=template)
+ def validate_value(self, value, context=None):
+ super(Schema, self).validate_constraints(value, context)
def __getitem__(self, key):
if key == self.TYPE:
@@ -226,7 +225,7 @@ class Parameter(object):
self.user_value = value
self.user_default = None
- def validate(self, validate_value=True, context=None, template=None):
+ def validate(self, validate_value=True, context=None):
"""Validates the parameter.
This method validates if the parameter's schema is valid,
@@ -242,9 +241,9 @@ class Parameter(object):
return
if self.user_value is not None:
- self._validate(self.user_value, context, template)
+ self._validate(self.user_value, context)
elif self.has_default():
- self._validate(self.default(), context, template)
+ self._validate(self.default(), context)
else:
raise exception.UserParameterMissing(key=self.name)
except exception.StackValidationFailed as ex:
@@ -327,12 +326,12 @@ class NumberParam(Parameter):
"""Return a float representation of the parameter."""
return float(super(NumberParam, self).value())
- def _validate(self, val, context, template=None):
+ def _validate(self, val, context):
try:
Schema.str_to_num(val)
except (ValueError, TypeError) as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
- self.schema.validate_value(val, context=context, template=template)
+ self.schema.validate_value(val, context)
def value(self):
return Schema.str_to_num(super(NumberParam, self).value())
@@ -343,12 +342,12 @@ class BooleanParam(Parameter):
__slots__ = tuple()
- def _validate(self, val, context, template=None):
+ def _validate(self, val, context):
try:
strutils.bool_from_string(val, strict=True)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
- self.schema.validate_value(val, context=context, template=template)
+ self.schema.validate_value(val, context)
def value(self):
if self.user_value is not None:
@@ -363,8 +362,8 @@ class StringParam(Parameter):
__slots__ = tuple()
- def _validate(self, val, context, template=None):
- self.schema.validate_value(val, context=context, template=template)
+ def _validate(self, val, context):
+ self.schema.validate_value(val, context=context)
def value(self):
return self.schema.to_schema_type(super(StringParam, self).value())
@@ -429,12 +428,12 @@ class CommaDelimitedListParam(ParsedParameter, collections.Sequence):
def _value_as_text(cls, value):
return ",".join(value)
- def _validate(self, val, context, template=None):
+ def _validate(self, val, context):
try:
parsed = self.parse(val)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
- self.schema.validate_value(parsed, context=context, template=template)
+ self.schema.validate_value(parsed, context)
class JsonParam(ParsedParameter):
@@ -478,12 +477,12 @@ class JsonParam(ParsedParameter):
def _value_as_text(cls, value):
return encodeutils.safe_decode(jsonutils.dumps(value))
- def _validate(self, val, context, template=None):
+ def _validate(self, val, context):
try:
parsed = self.parse(val)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
- self.schema.validate_value(parsed, context=context, template=template)
+ self.schema.validate_value(parsed, context)
@six.add_metaclass(abc.ABCMeta)
@@ -536,7 +535,7 @@ class Parameters(collections.Mapping):
self._validate_user_parameters()
for param in six.itervalues(self.params):
- param.validate(validate_value, context, self.tmpl)
+ param.validate(validate_value, context)
def __contains__(self, key):
"""Return whether the specified parameter exists."""
diff --git a/heat/engine/properties.py b/heat/engine/properties.py
index 92b9fe983..92ace22cc 100644
--- a/heat/engine/properties.py
+++ b/heat/engine/properties.py
@@ -278,7 +278,7 @@ class Property(object):
if isinstance(value, (bool, int)):
value = six.text_type(value)
else:
- raise ValueError(_('Value must be a string'))
+ raise ValueError(_('Value must be a string; got %r') % value)
return value
def _get_children(self, child_values, keys=None, validate=False,
@@ -349,8 +349,7 @@ class Property(object):
raise TypeError(_('"%s" is not a valid boolean') % value)
- def get_value(self, value, validate=False, template=None,
- translation=None):
+ def get_value(self, value, validate=False, translation=None):
"""Get value from raw value and sanitize according to data type."""
t = self.type()
@@ -370,8 +369,7 @@ class Property(object):
_value = value
if validate:
- self.schema.validate_constraints(_value, self.context,
- template=template)
+ self.schema.validate_constraints(_value, self.context)
return _value
@@ -405,7 +403,7 @@ class Properties(collections.Mapping):
in params_snippet.items())
return {}
- def validate(self, with_value=True, template=None):
+ def validate(self, with_value=True):
try:
for key in self.data:
if key not in self.props:
@@ -418,9 +416,7 @@ class Properties(collections.Mapping):
continue
if with_value:
try:
- self._get_property_value(key,
- validate=True,
- template=template)
+ self._get_property_value(key, validate=True)
except exception.StackValidationFailed as ex:
path = [key]
path.extend(ex.path)
@@ -455,7 +451,7 @@ class Properties(collections.Mapping):
if any(res.action == res.INIT for res in deps):
return True
- def get_user_value(self, key, validate=False, template=None):
+ def get_user_value(self, key, validate=False):
if key not in self:
raise KeyError(_('Invalid Property %s') % key)
@@ -477,7 +473,7 @@ class Properties(collections.Mapping):
value,
self.data)
- return prop.get_value(value, validate, template=template,
+ return prop.get_value(value, validate,
translation=self.translation)
# Children can raise StackValidationFailed with unique path which
# is necessary for further use in StackValidationFailed exception.
@@ -490,23 +486,22 @@ class Properties(collections.Mapping):
except Exception as e:
raise ValueError(six.text_type(e))
- def _get_property_value(self, key, validate=False, template=None):
+ def _get_property_value(self, key, validate=False):
if key not in self:
raise KeyError(_('Invalid Property %s') % key)
prop = self.props[key]
if not self.translation.is_deleted(prop.path) and key in self.data:
- return self.get_user_value(key, validate, template=template)
+ return self.get_user_value(key, validate)
elif self.translation.has_translation(prop.path):
value = self.translation.translate(prop.path, prop_data=self.data,
- validate=validate,
- template=template)
+ validate=validate)
if value is not None or prop.has_default():
return prop.get_value(value)
elif prop.required():
raise ValueError(_('Property %s not assigned') % key)
elif prop.has_default():
- return prop.get_value(None, validate, template=template,
+ return prop.get_value(None, validate,
translation=self.translation)
elif prop.required():
raise ValueError(_('Property %s not assigned') % key)
diff --git a/heat/engine/resource.py b/heat/engine/resource.py
index ef91a71c5..1a3666e18 100644
--- a/heat/engine/resource.py
+++ b/heat/engine/resource.py
@@ -351,10 +351,28 @@ class Resource(status.ResourceStatus):
curr_stack.identifier())
curr_stack.defn = initial_stk_defn
+ res_defn = initial_stk_defn.resource_definition(db_res.name)
+ res_type = initial_stk_defn.env.registry.get_class_to_instantiate(
+ res_defn.resource_type, resource_name=db_res.name)
+
+ # If the resource type has changed and the new one is a valid
+ # substitution, use that as the class to instantiate.
+ if is_update and (latest_stk_defn is not initial_stk_defn):
+ try:
+ new_res_defn = latest_stk_defn.resource_definition(db_res.name)
+ except KeyError:
+ pass
+ else:
+ new_registry = latest_stk_defn.env.registry
+ new_res_type = new_registry.get_class_to_instantiate(
+ new_res_defn.resource_type, resource_name=db_res.name)
+
+ if res_type.check_is_substituted(new_res_type):
+ res_type = new_res_type
+
# Load only the resource in question; don't load all resources
# by invoking stack.resources. Maintain light-weight stack.
- res_defn = initial_stk_defn.resource_definition(db_res.name)
- resource = cls(db_res.name, res_defn, curr_stack)
+ resource = res_type(db_res.name, res_defn, curr_stack)
resource._load_data(db_res)
curr_stack.defn = latest_stk_defn
@@ -558,6 +576,13 @@ class Resource(status.ResourceStatus):
"""
return False
+ def get_nested_parameters_stack(self):
+ """Return the nested stack for schema validation.
+
+ Regular resources don't have such a thing.
+ """
+ return
+
def has_hook(self, hook):
# Clear the cache to make sure the data is up to date:
self._data = None
@@ -829,6 +854,22 @@ class Resource(status.ResourceStatus):
else:
self._atomic_key = last_key + 1
+ def _should_lock_on_action(self, action):
+ """Return whether we should take a resource-level lock for an action.
+
+ In the legacy path, we always took a lock at the Stack level and never
+ at the Resource level. In convergence, we lock at the Resource level
+ for most operations. However, there are currently some exceptions:
+ the SUSPEND, RESUME, SNAPSHOT, and CHECK actions, and stack abandon.
+ """
+ return (self.stack.convergence and
+ not self.abandon_in_progress and
+ action in {self.ADOPT,
+ self.CREATE,
+ self.UPDATE,
+ self.ROLLBACK,
+ self.DELETE})
+
@contextlib.contextmanager
def _action_recorder(self, action, expected_exceptions=tuple()):
"""Return a context manager to record the progress of an action.
@@ -844,8 +885,12 @@ class Resource(status.ResourceStatus):
attempts = 1
first_iter = [True] # work around no nonlocal in py27
if self.stack.convergence:
- lock_acquire = self.LOCK_ACQUIRE
- lock_release = self.LOCK_RELEASE
+ if self._should_lock_on_action(action):
+ lock_acquire = self.LOCK_ACQUIRE
+ lock_release = self.LOCK_RELEASE
+ else:
+ lock_acquire = lock_release = self.LOCK_RESPECT
+
if action != self.CREATE:
attempts += max(cfg.CONF.client_retry_limit, 0)
else:
@@ -1388,15 +1433,17 @@ class Resource(status.ResourceStatus):
self.store(lock=self.LOCK_RESPECT)
self._calling_engine_id = engine_id
+
+ # Check that the resource type matches. If the type has changed by a
+ # legitimate substitution, the load()ed resource will already be of
+ # the new type.
registry = new_stack.env.registry
new_res_def = new_stack.defn.resource_definition(self.name)
new_res_type = registry.get_class_to_instantiate(
new_res_def.resource_type, resource_name=self.name)
- restricted_actions = registry.get_rsrc_restricted_actions(
- self.name)
- is_substituted = self.check_is_substituted(new_res_type)
- if type(self) is not new_res_type and not is_substituted:
- self._check_for_convergence_replace(restricted_actions)
+ if type(self) is not new_res_type:
+ restrictions = registry.get_rsrc_restricted_actions(self.name)
+ self._check_for_convergence_replace(restrictions)
action_rollback = self.stack.action == self.stack.ROLLBACK
status_in_progress = self.stack.status == self.stack.IN_PROGRESS
@@ -1409,17 +1456,8 @@ class Resource(status.ResourceStatus):
six.text_type(failure))
raise failure
- # Use new resource as update method if existing resource
- # need to be substituted.
- if is_substituted:
- substitute = new_res_type(self.name, self.t, self.stack)
- self.stack.resources[self.name] = substitute
- substitute._calling_engine_id = engine_id
- updater = substitute.update
- else:
- updater = self.update
runner = scheduler.TaskRunner(
- updater, new_res_def,
+ self.update, new_res_def,
update_templ_func=update_templ_id_and_requires)
try:
runner(timeout=timeout, progress_callback=progress_callback)
@@ -1503,16 +1541,34 @@ class Resource(status.ResourceStatus):
"error: %s", ex)
return after_props, before_props
+ def _prepare_update_replace_handler(self, action):
+ """Return the handler method for preparing to replace a resource.
+
+ This may be either restore_prev_rsrc() (in the case of a legacy
+ rollback) or, more typically, prepare_for_replace().
+
+ If the plugin has not overridden the method, then None is returned in
+ place of the default method (which is empty anyway).
+ """
+ if (self.stack.action == 'ROLLBACK' and
+ self.stack.status == 'IN_PROGRESS' and
+ not self.stack.convergence):
+ # handle case, when it's rollback and we should restore
+ # old resource
+ if self.restore_prev_rsrc != Resource.restore_prev_rsrc:
+ return self.restore_prev_rsrc
+ else:
+ if self.prepare_for_replace != Resource.prepare_for_replace:
+ return self.prepare_for_replace
+ return None
+
def _prepare_update_replace(self, action):
+ handler = self._prepare_update_replace_handler(action)
+ if handler is None:
+ return
+
try:
- if (self.stack.action == 'ROLLBACK' and
- self.stack.status == 'IN_PROGRESS' and
- not self.stack.convergence):
- # handle case, when it's rollback and we should restore
- # old resource
- self.restore_prev_rsrc()
- else:
- self.prepare_for_replace()
+ handler()
except Exception as e:
# if any exception happen, we should set the resource to
# FAILED, then raise ResourceFailure
@@ -1571,61 +1627,65 @@ class Resource(status.ResourceStatus):
needs_update = self._needs_update(after, before,
after_props, before_props,
prev_resource)
- if not needs_update:
- if update_templ_func is not None:
- update_templ_func(persist=True)
- if self.status == self.FAILED:
- status_reason = _('Update status to COMPLETE for '
- 'FAILED resource neither update '
- 'nor replace.')
- lock = (self.LOCK_RESPECT if self.stack.convergence
- else self.LOCK_NONE)
- self.state_set(self.action, self.COMPLETE,
- status_reason, lock=lock)
- return
+ except UpdateReplace:
+ with excutils.save_and_reraise_exception():
+ if self._prepare_update_replace_handler(action) is not None:
+ with self.lock(self._calling_engine_id):
+ self._prepare_update_replace(action)
+ except exception.ResourceActionRestricted as ae:
+ failure = exception.ResourceFailure(ae, self, action)
+ self._add_event(action, self.FAILED, six.text_type(ae))
+ raise failure
+
+ if not needs_update:
+ if update_templ_func is not None:
+ update_templ_func(persist=True)
+ if self.status == self.FAILED:
+ status_reason = _('Update status to COMPLETE for '
+ 'FAILED resource neither update '
+ 'nor replace.')
+ lock = (self.LOCK_RESPECT if self.stack.convergence
+ else self.LOCK_NONE)
+ self.state_set(self.action, self.COMPLETE,
+ status_reason, lock=lock)
+ return
- if not self.stack.convergence:
- if (self.action, self.status) in (
- (self.CREATE, self.IN_PROGRESS),
- (self.UPDATE, self.IN_PROGRESS),
- (self.ADOPT, self.IN_PROGRESS)):
- exc = Exception(_('Resource update already requested'))
- raise exception.ResourceFailure(exc, self, action)
+ if not self.stack.convergence:
+ if (self.action, self.status) in (
+ (self.CREATE, self.IN_PROGRESS),
+ (self.UPDATE, self.IN_PROGRESS),
+ (self.ADOPT, self.IN_PROGRESS)):
+ exc = Exception(_('Resource update already requested'))
+ raise exception.ResourceFailure(exc, self, action)
- LOG.info('updating %s', self)
+ LOG.info('updating %s', self)
- self.updated_time = datetime.utcnow()
+ self.updated_time = datetime.utcnow()
- with self._action_recorder(action, UpdateReplace):
- after_props.validate()
+ with self._action_recorder(action, UpdateReplace):
+ after_props.validate()
+ self.properties = before_props
+ tmpl_diff = self.update_template_diff(after.freeze(), before)
- tmpl_diff = self.update_template_diff(after.freeze(), before)
+ try:
if tmpl_diff and self.needs_replace_with_tmpl_diff(tmpl_diff):
raise UpdateReplace(self)
prop_diff = self.update_template_diff_properties(after_props,
before_props)
- self.properties = before_props
-
yield self.action_handler_task(action,
args=[after, tmpl_diff,
prop_diff])
- self.t = after
- self.reparse()
- self._update_stored_properties()
- if update_templ_func is not None:
- # template/requires will be persisted by _action_recorder()
- update_templ_func(persist=False)
+ except UpdateReplace:
+ with excutils.save_and_reraise_exception():
+ self._prepare_update_replace(action)
- except exception.ResourceActionRestricted as ae:
- # catch all ResourceActionRestricted exceptions
- failure = exception.ResourceFailure(ae, self, action)
- self._add_event(action, self.FAILED, six.text_type(ae))
- raise failure
- except UpdateReplace:
- # catch all UpdateReplace exceptions
- self._prepare_update_replace(action)
- raise
+ self.t = after
+ self.reparse()
+ self._update_stored_properties()
+ if update_templ_func is not None:
+ # template/requires will be persisted by _action_recorder()
+ update_templ_func(persist=False)
yield self._break_if_required(
self.UPDATE, environment.HOOK_POST_UPDATE)
@@ -1800,19 +1860,18 @@ class Resource(status.ResourceStatus):
self.stack.context,
self.t.resource_type
)
- path = '.'.join([self.stack.t.RESOURCES, self.name])
- function.validate(self.t, path)
- self.validate_deletion_policy(self.t.deletion_policy())
- self.t.update_policy(self.update_policy_schema,
- self.context).validate()
try:
+ self.t.validate()
+ self.validate_deletion_policy(self.t.deletion_policy())
+ self.t.update_policy(self.update_policy_schema,
+ self.context).validate()
validate = self.properties.validate(
- with_value=self.stack.strict_validate,
- template=self.t)
+ with_value=self.stack.strict_validate)
except exception.StackValidationFailed as ex:
- path = [self.stack.t.RESOURCES, self.t.name,
- self.stack.t.get_section_name(ex.path[0])]
- path.extend(ex.path[1:])
+ path = [self.stack.t.RESOURCES, self.t.name]
+ if ex.path:
+ path.append(self.stack.t.get_section_name(ex.path[0]))
+ path.extend(ex.path[1:])
raise exception.StackValidationFailed(
error=ex.error,
path=path,
@@ -1821,14 +1880,15 @@ class Resource(status.ResourceStatus):
@classmethod
def validate_deletion_policy(cls, policy):
+ path = rsrc_defn.DELETION_POLICY
if policy not in rsrc_defn.ResourceDefinition.DELETION_POLICIES:
msg = _('Invalid deletion policy "%s"') % policy
- raise exception.StackValidationFailed(message=msg)
+ raise exception.StackValidationFailed(message=msg, path=path)
if policy == rsrc_defn.ResourceDefinition.SNAPSHOT:
if not callable(getattr(cls, 'handle_snapshot_delete', None)):
msg = _('"%s" deletion policy not supported') % policy
- raise exception.StackValidationFailed(message=msg)
+ raise exception.StackValidationFailed(message=msg, path=path)
def _update_replacement_data(self, template_id):
# Update the replacement resource's needed_by and replaces
@@ -2033,11 +2093,14 @@ class Resource(status.ResourceStatus):
self._rsrc_metadata = metadata
if self.id is not None:
- if (lock == self.LOCK_NONE or self._calling_engine_id is None):
+ if (lock == self.LOCK_NONE or
+ (lock in {self.LOCK_ACQUIRE, self.LOCK_RELEASE} and
+ self._calling_engine_id is None)):
resource_objects.Resource.update_by_id(
self.context, self.id, rs)
if lock != self.LOCK_NONE:
- LOG.warning("no calling_engine_id in store %s", str(rs))
+ LOG.error('No calling_engine_id in store() %s',
+ six.text_type(rs))
else:
self._store_with_lock(rs, lock)
else:
@@ -2086,15 +2149,18 @@ class Resource(status.ResourceStatus):
def lock(self, engine_id):
self._calling_engine_id = engine_id
try:
- self._store_with_lock({}, self.LOCK_ACQUIRE)
+ if engine_id is not None:
+ self._store_with_lock({}, self.LOCK_ACQUIRE)
yield
except exception.UpdateInProgress:
raise
except BaseException:
with excutils.save_and_reraise_exception():
- self._store_with_lock({}, self.LOCK_RELEASE)
+ if engine_id is not None:
+ self._store_with_lock({}, self.LOCK_RELEASE)
else:
- self._store_with_lock({}, self.LOCK_RELEASE)
+ if engine_id is not None:
+ self._store_with_lock({}, self.LOCK_RELEASE)
def _resolve_any_attribute(self, attr):
"""Method for resolving any attribute, including base attributes.
@@ -2172,6 +2238,13 @@ class Resource(status.ResourceStatus):
resource_result = {}
for key in self._update_allowed_properties:
if key in resource_data:
+ if key == 'name' and resource_properties.get(key) is None:
+ # We use `physical_resource_name` for name property in some
+ # resources when name not provided during create, so we
+ # shouldn't add name in resource_data if it's None in
+ # property (might just the cases that we using
+ # `physical_resource_name`).
+ continue
resource_result[key] = resource_data.get(key)
return resource_result
@@ -2390,9 +2463,6 @@ class Resource(status.ResourceStatus):
# this is from Ceilometer.
auto = '%(previous)s to %(current)s (%(reason)s)' % details
return 'alarm state changed from %s' % auto
- elif 'state' in details:
- # this is from watchrule
- return 'alarm state changed to %(state)s' % details
return 'Unknown'
diff --git a/heat/engine/resources/aws/autoscaling/autoscaling_group.py b/heat/engine/resources/aws/autoscaling/autoscaling_group.py
index e7613ecf4..481cca7cd 100644
--- a/heat/engine/resources/aws/autoscaling/autoscaling_group.py
+++ b/heat/engine/resources/aws/autoscaling/autoscaling_group.py
@@ -223,19 +223,21 @@ class AutoScalingGroup(cooldown.CooldownMixin, instgrp.InstanceGroup):
def check_create_complete(self, task):
"""Update cooldown timestamp after create succeeds."""
done = super(AutoScalingGroup, self).check_create_complete(task)
+ cooldown = self.properties[self.COOLDOWN]
if done:
- self._finished_scaling(
- "%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
- grouputils.get_size(self)))
+ self._finished_scaling(cooldown,
+ "%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
+ grouputils.get_size(self)))
return done
def check_update_complete(self, cookie):
"""Update the cooldown timestamp after update succeeds."""
done = super(AutoScalingGroup, self).check_update_complete(cookie)
+ cooldown = self.properties[self.COOLDOWN]
if done:
- self._finished_scaling(
- "%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
- grouputils.get_size(self)))
+ self._finished_scaling(cooldown,
+ "%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
+ grouputils.get_size(self)))
return done
def _get_new_capacity(self, capacity,
@@ -284,7 +286,7 @@ class AutoScalingGroup(cooldown.CooldownMixin, instgrp.InstanceGroup):
def adjust(self, adjustment,
adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY,
- min_adjustment_step=None):
+ min_adjustment_step=None, cooldown=None):
"""Adjust the size of the scaling group if the cooldown permits."""
if self.status != self.COMPLETE:
LOG.info("%s NOT performing scaling adjustment, "
@@ -300,7 +302,10 @@ class AutoScalingGroup(cooldown.CooldownMixin, instgrp.InstanceGroup):
"as there is no change in capacity.", self.name)
raise resource.NoActionRequired
- self._check_scaling_allowed()
+ if cooldown is None:
+ cooldown = self.properties[self.COOLDOWN]
+
+ self._check_scaling_allowed(cooldown)
# send a notification before, on-error and on-success.
notif = {
@@ -342,7 +347,8 @@ class AutoScalingGroup(cooldown.CooldownMixin, instgrp.InstanceGroup):
"group %s.", self.name)
raise
finally:
- self._finished_scaling("%s : %s" % (adjustment_type, adjustment),
+ self._finished_scaling(cooldown,
+ "%s : %s" % (adjustment_type, adjustment),
size_changed=size_changed)
def _tags(self):
diff --git a/heat/engine/resources/aws/cfn/stack.py b/heat/engine/resources/aws/cfn/stack.py
index 8f348e739..4ecb1b6d3 100644
--- a/heat/engine/resources/aws/cfn/stack.py
+++ b/heat/engine/resources/aws/cfn/stack.py
@@ -85,7 +85,11 @@ class NestedStack(stack_resource.StackResource):
if key and not key.startswith('Outputs.'):
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
- attribute = self.get_output(key.partition('.')[-1])
+ try:
+ attribute = self.get_output(key.partition('.')[-1])
+ except exception.NotFound:
+ raise exception.InvalidTemplateAttribute(resource=self.name,
+ key=key)
return attributes.select_from_attribute(attribute, path)
def get_reference_id(self):
diff --git a/heat/engine/resources/aws/ec2/eip.py b/heat/engine/resources/aws/ec2/eip.py
index 4c3f83d22..f343c6e61 100644
--- a/heat/engine/resources/aws/ec2/eip.py
+++ b/heat/engine/resources/aws/ec2/eip.py
@@ -17,6 +17,7 @@ import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
+from heat.engine.clients import client_exception
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
@@ -98,36 +99,27 @@ class ElasticIp(resource.Resource):
props = {'floating_network_id': ext_net}
ips = self.neutron().create_floatingip({
'floatingip': props})['floatingip']
- self.ipaddress = ips['floating_ip_address']
self.resource_id_set(ips['id'])
+ self.ipaddress = ips['floating_ip_address']
+
LOG.info('ElasticIp create %s', str(ips))
instance_id = self.properties[self.INSTANCE_ID]
if instance_id:
- server = self.client().servers.get(instance_id)
- server.add_floating_ip(self._ipaddress())
+ self.client_plugin().associate_floatingip(instance_id,
+ ips['id'])
def handle_delete(self):
if self.resource_id is None:
return
# may be just create an eip when creation, or create the association
- # failed when creation, there will no association, if we attempt to
+ # failed when creation, there will be no association, if we attempt to
# disassociate, an exception will raised, we need
# to catch and ignore it, and then to deallocate the eip
instance_id = self.properties[self.INSTANCE_ID]
if instance_id:
- try:
- server = self.client().servers.get(instance_id)
- if server:
- server.remove_floating_ip(self._ipaddress())
- except Exception as e:
- is_not_found = self.client_plugin('nova').is_not_found(e)
- is_unprocessable_entity = self.client_plugin(
- 'nova').is_unprocessable_entity(e)
-
- if (not is_not_found and not is_unprocessable_entity):
- raise
-
+ with self.client_plugin().ignore_not_found:
+ self.client_plugin().dissociate_floatingip(self.resource_id)
# deallocate the eip
with self.client_plugin('neutron').ignore_not_found:
self.neutron().delete_floatingip(self.resource_id)
@@ -135,19 +127,13 @@ class ElasticIp(resource.Resource):
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
if self.INSTANCE_ID in prop_diff:
- instance_id = prop_diff.get(self.INSTANCE_ID)
+ instance_id = prop_diff[self.INSTANCE_ID]
if instance_id:
- # no need to remove the floating ip from the old instance,
- # nova does this automatically when calling
- # add_floating_ip().
- server = self.client().servers.get(instance_id)
- server.add_floating_ip(self._ipaddress())
+ self.client_plugin().associate_floatingip(
+ instance_id, self.resource_id)
else:
- # to remove the floating_ip from the old instance
- instance_id_old = self.properties[self.INSTANCE_ID]
- if instance_id_old:
- server = self.client().servers.get(instance_id_old)
- server.remove_floating_ip(self._ipaddress())
+ self.client_plugin().dissociate_floatingip(
+ self.resource_id)
def get_reference_id(self):
eip = self._ipaddress()
@@ -251,45 +237,31 @@ class ElasticIpAssociation(resource.Resource):
allocationId,
{'floatingip': {'port_id': port_id}})
except Exception as e:
- if ignore_not_found:
- self.client_plugin('neutron').ignore_not_found(e)
- else:
+ if not (ignore_not_found and self.client_plugin(
+ 'neutron').is_not_found(e)):
raise
- def _nova_remove_floating_ip(self, instance_id, eip,
- ignore_not_found=False):
- server = None
+ def _remove_floating_ip_address(self, eip, ignore_not_found=False):
try:
- server = self.client().servers.get(instance_id)
- server.remove_floating_ip(eip)
+ self.client_plugin().dissociate_floatingip_address(eip)
except Exception as e:
- is_not_found = self.client_plugin('nova').is_not_found(e)
- iue = self.client_plugin('nova').is_unprocessable_entity(e)
- if ((not ignore_not_found and is_not_found) or
- (not is_not_found and not iue)):
+ addr_not_found = isinstance(
+ e, client_exception.EntityMatchNotFound)
+ fip_not_found = self.client_plugin().is_not_found(e)
+ not_found = addr_not_found or fip_not_found
+ if not (ignore_not_found and not_found):
raise
- return server
-
- def _floatingIp_detach(self,
- nova_ignore_not_found=False,
- neutron_ignore_not_found=False):
+ def _floatingIp_detach(self):
eip = self.properties[self.EIP]
allocation_id = self.properties[self.ALLOCATION_ID]
- instance_id = self.properties[self.INSTANCE_ID]
- server = None
if eip:
# if has eip_old, to remove the eip_old from the instance
- server = self._nova_remove_floating_ip(instance_id,
- eip,
- nova_ignore_not_found)
+ self._remove_floating_ip_address(eip)
else:
# if hasn't eip_old, to update neutron floatingIp
self._neutron_update_floating_ip(allocation_id,
- None,
- neutron_ignore_not_found)
-
- return server
+ None)
def _handle_update_eipInfo(self, prop_diff):
eip_update = prop_diff.get(self.EIP)
@@ -297,13 +269,12 @@ class ElasticIpAssociation(resource.Resource):
instance_id = self.properties[self.INSTANCE_ID]
ni_id = self.properties[self.NETWORK_INTERFACE_ID]
if eip_update:
- server = self._floatingIp_detach(neutron_ignore_not_found=True)
- if server:
- # then to attach the eip_update to the instance
- server.add_floating_ip(eip_update)
- self.resource_id_set(eip_update)
+ self._floatingIp_detach()
+ self.client_plugin().associate_floatingip_address(instance_id,
+ eip_update)
+ self.resource_id_set(eip_update)
elif allocation_id_update:
- self._floatingIp_detach(nova_ignore_not_found=True)
+ self._floatingIp_detach()
port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
if not port_id or not port_rsrc:
LOG.error('Port not specified.')
@@ -323,8 +294,8 @@ class ElasticIpAssociation(resource.Resource):
# if update portInfo, no need to detach the port from
# old instance/floatingip.
if eip:
- server = self.client().servers.get(instance_id_update)
- server.add_floating_ip(eip)
+ self.client_plugin().associate_floatingip_address(
+ instance_id_update, eip)
else:
port_id, port_rsrc = self._get_port_info(ni_id_update,
instance_id_update)
@@ -339,15 +310,15 @@ class ElasticIpAssociation(resource.Resource):
def handle_create(self):
"""Add a floating IP address to a server."""
- if self.properties[self.EIP]:
- server = self.client().servers.get(
- self.properties[self.INSTANCE_ID])
- server.add_floating_ip(self.properties[self.EIP])
- self.resource_id_set(self.properties[self.EIP])
+ eip = self.properties[self.EIP]
+ if eip:
+ self.client_plugin().associate_floatingip_address(
+ self.properties[self.INSTANCE_ID], eip)
+ self.resource_id_set(eip)
LOG.debug('ElasticIpAssociation '
'%(instance)s.add_floating_ip(%(eip)s)',
{'instance': self.properties[self.INSTANCE_ID],
- 'eip': self.properties[self.EIP]})
+ 'eip': eip})
elif self.properties[self.ALLOCATION_ID]:
ni_id = self.properties[self.NETWORK_INTERFACE_ID]
instance_id = self.properties[self.INSTANCE_ID]
@@ -370,11 +341,9 @@ class ElasticIpAssociation(resource.Resource):
return
if self.properties[self.EIP]:
- instance_id = self.properties[self.INSTANCE_ID]
eip = self.properties[self.EIP]
- self._nova_remove_floating_ip(instance_id,
- eip,
- ignore_not_found=True)
+ self._remove_floating_ip_address(eip,
+ ignore_not_found=True)
elif self.properties[self.ALLOCATION_ID]:
float_id = self.properties[self.ALLOCATION_ID]
self._neutron_update_floating_ip(float_id,
diff --git a/heat/engine/resources/aws/lb/loadbalancer.py b/heat/engine/resources/aws/lb/loadbalancer.py
index 8ee4bbf07..439de0643 100644
--- a/heat/engine/resources/aws/lb/loadbalancer.py
+++ b/heat/engine/resources/aws/lb/loadbalancer.py
@@ -628,7 +628,11 @@ backend servers
def _resolve_attribute(self, name):
"""We don't really support any of these yet."""
if name == self.DNS_NAME:
- return self.get_output('PublicIp')
+ try:
+ return self.get_output('PublicIp')
+ except exception.NotFound:
+ raise exception.InvalidTemplateAttribute(resource=self.name,
+ key=name)
elif name in self.attributes_schema:
# Not sure if we should return anything for the other attribs
# since they aren't really supported in any meaningful way
diff --git a/heat/engine/resources/openstack/aodh/alarm.py b/heat/engine/resources/openstack/aodh/alarm.py
index ca6bb4e4c..d9c6eada8 100644
--- a/heat/engine/resources/openstack/aodh/alarm.py
+++ b/heat/engine/resources/openstack/aodh/alarm.py
@@ -13,14 +13,12 @@
import six
-from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import alarm_base
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
-from heat.engine import watchrule
class AodhAlarm(alarm_base.BaseAlarm):
@@ -33,6 +31,13 @@ class AodhAlarm(alarm_base.BaseAlarm):
instance if the instance has been up for more than 10 min, some action will
be called.
"""
+ support_status = support.SupportStatus(
+ status=support.DEPRECATED,
+ message=_('Theshold alarm relies on ceilometer-api and has been '
+ 'deprecated in aodh since Ocata. Use '
+ 'OS::Aodh::GnocchiAggregationByResourcesAlarm instead.'),
+ version='10.0.0',
+ previous_status=support.SupportStatus(version='2014.1'))
PROPERTIES = (
COMPARISON_OPERATOR, EVALUATION_PERIODS, METER_NAME, PERIOD,
@@ -178,17 +183,6 @@ class AodhAlarm(alarm_base.BaseAlarm):
alarm = self.client().alarm.create(props)
self.resource_id_set(alarm['alarm_id'])
- # the watchrule below is for backwards compatibility.
- # 1) so we don't create watch tasks unnecessarily
- # 2) to support CW stats post, we will redirect the request
- # to ceilometer.
- wr = watchrule.WatchRule(context=self.context,
- watch_name=self.physical_resource_name(),
- rule=dict(self.properties),
- stack_id=self.stack.id)
- wr.state = wr.CEILOMETER_CONTROLLED
- wr.store()
-
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
new_props = json_snippet.properties(self.properties_schema,
@@ -209,19 +203,7 @@ class AodhAlarm(alarm_base.BaseAlarm):
return record_reality
- def handle_delete(self):
- try:
- wr = watchrule.WatchRule.load(
- self.context, watch_name=self.physical_resource_name())
- wr.destroy()
- except exception.EntityNotFound:
- pass
-
- return super(AodhAlarm, self).handle_delete()
-
def handle_check(self):
- watch_name = self.physical_resource_name()
- watchrule.WatchRule.load(self.context, watch_name=watch_name)
self.client().alarm.get(self.resource_id)
diff --git a/heat/engine/resources/openstack/heat/autoscaling_group.py b/heat/engine/resources/openstack/heat/autoscaling_group.py
index 1eb497630..a5de25527 100644
--- a/heat/engine/resources/openstack/heat/autoscaling_group.py
+++ b/heat/engine/resources/openstack/heat/autoscaling_group.py
@@ -13,6 +13,8 @@
import six
+from oslo_log import log as logging
+
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
@@ -25,6 +27,8 @@ from heat.engine.resources.aws.autoscaling import autoscaling_group as aws_asg
from heat.engine import rsrc_defn
from heat.engine import support
+LOG = logging.getLogger(__name__)
+
class HOTInterpreter(template.HOTemplate20150430):
def __new__(cls):
@@ -195,53 +199,105 @@ class AutoScalingResourceGroup(aws_asg.AutoScalingGroup):
self)._create_template(num_instances, num_replace,
template_version=template_version)
+ def _attribute_output_name(self, *attr_path):
+ return ', '.join(six.text_type(a) for a in attr_path)
+
def get_attribute(self, key, *path):
if key == self.CURRENT_SIZE:
return grouputils.get_size(self)
- if key == self.REFS:
- refs = grouputils.get_member_refids(self)
- return refs
- if key == self.REFS_MAP:
- members = grouputils.get_members(self)
- refs_map = {m.name: m.resource_id for m in members}
- return refs_map
- if path:
- members = grouputils.get_members(self)
- attrs = ((rsrc.name, rsrc.FnGetAtt(*path)) for rsrc in members)
- if key == self.OUTPUTS:
- return dict(attrs)
- if key == self.OUTPUTS_LIST:
- return [value for name, value in attrs]
- if key.startswith("resource."):
- return grouputils.get_nested_attrs(self, key, True, *path)
+ op_key = key
+ op_path = path
+ keycomponents = None
+ if key == self.OUTPUTS_LIST:
+ op_key = self.OUTPUTS
+ elif key == self.REFS:
+ op_key = self.REFS_MAP
+ elif key.startswith("resource."):
+ keycomponents = key.split('.', 2)
+ if len(keycomponents) > 2:
+ op_path = (keycomponents[2],) + path
+ op_key = self.OUTPUTS if op_path else self.REFS_MAP
+ try:
+ output = self.get_output(self._attribute_output_name(op_key,
+ *op_path))
+ except (exception.NotFound,
+ exception.TemplateOutputError) as op_err:
+ LOG.debug('Falling back to grouputils due to %s', op_err)
+
+ if key == self.REFS:
+ return grouputils.get_member_refids(self)
+ if key == self.REFS_MAP:
+ members = grouputils.get_members(self)
+ return {m.name: m.resource_id for m in members}
+ if path and key in {self.OUTPUTS, self.OUTPUTS_LIST}:
+ members = grouputils.get_members(self)
+ attrs = ((rsrc.name,
+ rsrc.FnGetAtt(*path)) for rsrc in members)
+ if key == self.OUTPUTS:
+ return dict(attrs)
+ if key == self.OUTPUTS_LIST:
+ return [value for name, value in attrs]
+ if keycomponents is not None:
+ return grouputils.get_nested_attrs(self, key, True, *path)
+ else:
+ if key in {self.REFS, self.REFS_MAP}:
+ names = self._group_data().member_names(False)
+ if key == self.REFS:
+ return [output[n] for n in names if n in output]
+ else:
+ return {n: output[n] for n in names if n in output}
+
+ if path and key in {self.OUTPUTS_LIST, self.OUTPUTS}:
+ names = self._group_data().member_names(False)
+ if key == self.OUTPUTS_LIST:
+ return [output[n] for n in names if n in output]
+ else:
+ return {n: output[n] for n in names if n in output}
+
+ if keycomponents is not None:
+ names = list(self._group_data().member_names(False))
+ index = keycomponents[1]
+ try:
+ resource_name = names[int(index)]
+ return output[resource_name]
+ except (IndexError, KeyError):
+ raise exception.NotFound(_("Member '%(mem)s' not found "
+ "in group resource '%(grp)s'.")
+ % {'mem': index,
+ 'grp': self.name})
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
- def _nested_output_defns(self, resource_names, get_attr_fn):
+ def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
if isinstance(attr, six.string_types):
key, path = attr, []
- output_name = attr
else:
key, path = attr[0], list(attr[1:])
- output_name = ', '.join(attr)
-
+ # Always use map types, as list order is not defined at
+ # template generation time.
+ if key == self.OUTPUTS_LIST:
+ key = self.OUTPUTS
+ if key == self.REFS:
+ key = self.REFS_MAP
if key.startswith("resource."):
keycomponents = key.split('.', 2)
- res_name = keycomponents[1]
- attr_name = keycomponents[2:]
- if attr_name and (res_name in resource_names):
- value = get_attr_fn([res_name] + attr_name + path)
- yield output.OutputDefinition(output_name, value)
+ path = keycomponents[2:] + path
+ if path:
+ key = self.OUTPUTS
+ else:
+ key = self.REFS_MAP
+ output_name = self._attribute_output_name(key, *path)
+ value = None
+ if key == self.REFS_MAP:
+ value = {r: get_res_fn(r) for r in resource_names}
elif key == self.OUTPUTS and path:
value = {r: get_attr_fn([r] + path) for r in resource_names}
- yield output.OutputDefinition(output_name, value)
- elif key == self.OUTPUTS_LIST and path:
- value = [get_attr_fn([r] + path) for r in resource_names]
+ if value is not None:
yield output.OutputDefinition(output_name, value)
diff --git a/heat/engine/resources/openstack/heat/cloud_watch.py b/heat/engine/resources/openstack/heat/cloud_watch.py
index 8a227a742..26ed62f13 100644
--- a/heat/engine/resources/openstack/heat/cloud_watch.py
+++ b/heat/engine/resources/openstack/heat/cloud_watch.py
@@ -13,193 +13,23 @@
from oslo_config import cfg
-from heat.common import exception
from heat.common.i18n import _
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine import resource
+from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
-from heat.engine import watchrule
-class CloudWatchAlarm(resource.Resource):
- PROPERTIES = (
- COMPARISON_OPERATOR, ALARM_DESCRIPTION, EVALUATION_PERIODS,
- METRIC_NAME, NAMESPACE, PERIOD, STATISTIC, ALARM_ACTIONS,
- OKACTIONS, DIMENSIONS, INSUFFICIENT_DATA_ACTIONS, THRESHOLD,
- UNITS,
- ) = (
- 'ComparisonOperator', 'AlarmDescription', 'EvaluationPeriods',
- 'MetricName', 'Namespace', 'Period', 'Statistic', 'AlarmActions',
- 'OKActions', 'Dimensions', 'InsufficientDataActions', 'Threshold',
- 'Units',
- )
-
- properties_schema = {
- COMPARISON_OPERATOR: properties.Schema(
- properties.Schema.STRING,
- _('Operator used to compare the specified Statistic with '
- 'Threshold.'),
- constraints=[
- constraints.AllowedValues(['GreaterThanOrEqualToThreshold',
- 'GreaterThanThreshold',
- 'LessThanThreshold',
- 'LessThanOrEqualToThreshold']),
- ],
- required=True,
- update_allowed=True
- ),
- ALARM_DESCRIPTION: properties.Schema(
- properties.Schema.STRING,
- _('Description for the alarm.'),
- update_allowed=True
- ),
- EVALUATION_PERIODS: properties.Schema(
- properties.Schema.STRING,
- _('Number of periods to evaluate over.'),
- required=True,
- update_allowed=True
- ),
- METRIC_NAME: properties.Schema(
- properties.Schema.STRING,
- _('Metric name watched by the alarm.'),
- required=True
- ),
- NAMESPACE: properties.Schema(
- properties.Schema.STRING,
- _('Namespace for the metric.'),
- required=True
- ),
- PERIOD: properties.Schema(
- properties.Schema.STRING,
- _('Period (seconds) to evaluate over.'),
- required=True,
- update_allowed=True
- ),
- STATISTIC: properties.Schema(
- properties.Schema.STRING,
- _('Metric statistic to evaluate.'),
- constraints=[
- constraints.AllowedValues(['SampleCount', 'Average', 'Sum',
- 'Minimum', 'Maximum']),
- ],
- required=True,
- update_allowed=True
- ),
- ALARM_ACTIONS: properties.Schema(
- properties.Schema.LIST,
- _('A list of actions to execute when state transitions to alarm.'),
- update_allowed=True
- ),
- OKACTIONS: properties.Schema(
- properties.Schema.LIST,
- _('A list of actions to execute when state transitions to ok.'),
- update_allowed=True
- ),
- DIMENSIONS: properties.Schema(
- properties.Schema.LIST,
- _('A list of dimensions (arbitrary name/value pairs) associated '
- 'with the metric.')
- ),
- INSUFFICIENT_DATA_ACTIONS: properties.Schema(
- properties.Schema.LIST,
- _('A list of actions to execute when state transitions to '
- 'insufficient-data.'),
- update_allowed=True
- ),
- THRESHOLD: properties.Schema(
- properties.Schema.STRING,
- _('Threshold to evaluate against.'),
- required=True,
- update_allowed=True
- ),
- UNITS: properties.Schema(
- properties.Schema.STRING,
- _('Unit for the metric.'),
- constraints=[
- constraints.AllowedValues(['Seconds', 'Microseconds',
- 'Milliseconds', 'Bytes',
- 'Kilobytes', 'Megabytes',
- 'Gigabytes', 'Terabytes', 'Bits',
- 'Kilobits', 'Megabits',
- 'Gigabits', 'Terabits', 'Percent',
- 'Count', 'Bytes/Second',
- 'Kilobytes/Second',
- 'Megabytes/Second',
- 'Gigabytes/Second',
- 'Terabytes/Second', 'Bits/Second',
- 'Kilobits/Second',
- 'Megabits/Second',
- 'Gigabits/Second',
- 'Terabits/Second', 'Count/Second',
- None]),
- ],
- update_allowed=True
- ),
- }
-
- strict_dependency = False
-
+class CloudWatchAlarm(none_resource.NoneResource):
support_status = support.SupportStatus(
status=support.HIDDEN,
- message=_('OS::Heat::CWLiteAlarm is deprecated, '
- 'use OS::Aodh::Alarm instead.'),
+ message=_('OS::Heat::CWLiteAlarm resource has been removed '
+ 'since version 10.0.0. Existing stacks can still '
+ 'use it, where it would do nothing for update/delete.'),
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
- version='2014.2'
- )
+ version='2014.2')
)
- def handle_create(self):
- wr = watchrule.WatchRule(context=self.context,
- watch_name=self.physical_resource_name(),
- rule=dict(self.properties),
- stack_id=self.stack.id)
- wr.store()
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- # If Properties has changed, update self.properties, so we
- # get the new values during any subsequent adjustment
- if prop_diff:
- self.properties = json_snippet.properties(self.properties_schema,
- self.context)
- loader = watchrule.WatchRule.load
- wr = loader(self.context,
- watch_name=self.physical_resource_name())
-
- wr.rule = dict(self.properties)
- wr.store()
-
- def handle_delete(self):
- try:
- wr = watchrule.WatchRule.load(
- self.context, watch_name=self.physical_resource_name())
- wr.destroy()
- except exception.EntityNotFound:
- pass
-
- def handle_suspend(self):
- wr = watchrule.WatchRule.load(self.context,
- watch_name=self.physical_resource_name())
- wr.state_set(wr.SUSPENDED)
-
- def handle_resume(self):
- wr = watchrule.WatchRule.load(self.context,
- watch_name=self.physical_resource_name())
- # Just set to NODATA, which will be re-evaluated next periodic task
- wr.state_set(wr.NODATA)
-
- def handle_check(self):
- watch_name = self.physical_resource_name()
- watchrule.WatchRule.load(self.context, watch_name=watch_name)
-
- def get_reference_id(self):
- return self.physical_resource_name_or_FnGetRefId()
-
- def physical_resource_name(self):
- return '%s-%s' % (self.stack.name, self.name)
-
def resource_mapping():
cfg.CONF.import_opt('enable_cloud_watch_lite', 'heat.common.config')
diff --git a/heat/engine/resources/openstack/heat/ha_restarter.py b/heat/engine/resources/openstack/heat/ha_restarter.py
index 438a3205b..c8f9618be 100644
--- a/heat/engine/resources/openstack/heat/ha_restarter.py
+++ b/heat/engine/resources/openstack/heat/ha_restarter.py
@@ -12,101 +12,38 @@
# under the License.
from oslo_log import log as logging
-import six
from heat.common.i18n import _
-from heat.engine import attributes
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine.resources import signal_responder
+from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
LOG = logging.getLogger(__name__)
-class Restarter(signal_responder.SignalResponder):
+class Restarter(none_resource.NoneResource):
support_status = support.SupportStatus(
- support.DEPRECATED,
- _('The HARestarter resource type is deprecated and will be removed '
- 'in a future release of Heat, once it has support for auto-healing '
- 'any type of resource. Note that HARestarter does *not* actually '
- 'restart servers - it deletes and then recreates them. It also does '
- 'the same to all dependent resources, and may therefore exhibit '
- 'unexpected and undesirable behaviour. Instead, use the '
- 'mark-unhealthy API to mark a resource as needing replacement, and '
- 'then a stack update to perform the replacement while respecting '
- 'the dependencies and not deleting them unnecessarily.'),
- version='2015.1'
- )
-
- PROPERTIES = (
- INSTANCE_ID,
- ) = (
- 'InstanceId',
- )
-
- ATTRIBUTES = (
- ALARM_URL,
- ) = (
- 'AlarmUrl',
- )
-
- properties_schema = {
- INSTANCE_ID: properties.Schema(
- properties.Schema.STRING,
- _('Instance ID to be restarted.'),
- required=True,
- constraints=[
- constraints.CustomConstraint('nova.server')
- ]
- ),
- }
-
- attributes_schema = {
- ALARM_URL: attributes.Schema(
- _("A signed url to handle the alarm (Heat extension)."),
- type=attributes.Schema.STRING
- ),
- }
-
- def handle_create(self):
- super(Restarter, self).handle_create()
- self.resource_id_set(self._get_user_id())
-
- def handle_signal(self, details=None):
- if details is None:
- alarm_state = 'alarm'
- else:
- alarm_state = details.get('state', 'alarm').lower()
-
- LOG.info('%(name)s Alarm, new state %(state)s',
- {'name': self.name, 'state': alarm_state})
-
- if alarm_state != 'alarm':
- return
-
- target_id = self.properties[self.INSTANCE_ID]
- victim = self.stack.resource_by_refid(target_id)
- if victim is None:
- LOG.info('%(name)s Alarm, can not find instance '
- '%(instance)s',
- {'name': self.name,
- 'instance': target_id})
- return
-
- LOG.info('%(name)s Alarm, restarting resource: %(victim)s',
- {'name': self.name, 'victim': victim.name})
- self.stack.restart_resource(victim.name)
-
- def _resolve_attribute(self, name):
- """Resolves the resource's attributes.
-
- Heat extension: "AlarmUrl" returns the url to post to the policy
- when there is an alarm.
- """
- if name == self.ALARM_URL and self.resource_id is not None:
- return six.text_type(self._get_ec2_signed_url())
+ status=support.HIDDEN,
+ version='10.0.0',
+ message=_('The HARestarter resource type has been removed. Existing '
+ 'stacks containing HARestarter resources can still be '
+ 'used, but the HARestarter resource will be a placeholder '
+ 'that does nothing.'),
+ previous_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ message=_('The HARestarter resource type is deprecated and will '
+ 'be removed in a future release of Heat, once it has '
+ 'support for auto-healing any type of resource. Note '
+ 'that HARestarter does *not* actually restart '
+ 'servers - it deletes and then recreates them. It also '
+ 'does the same to all dependent resources, and may '
+ 'therefore exhibit unexpected and undesirable '
+ 'behaviour. Instead, use the mark-unhealthy API to '
+ 'mark a resource as needing replacement, and then a '
+ 'stack update to perform the replacement while '
+ 'respecting the dependencies and not deleting them '
+ 'unnecessarily.'),
+ version='2015.1'))
def resource_mapping():
diff --git a/heat/engine/resources/openstack/heat/instance_group.py b/heat/engine/resources/openstack/heat/instance_group.py
index 75f3c53a2..880c74bd8 100644
--- a/heat/engine/resources/openstack/heat/instance_group.py
+++ b/heat/engine/resources/openstack/heat/instance_group.py
@@ -14,7 +14,10 @@
import functools
import six
+from oslo_log import log as logging
+
from heat.common import environment_format
+from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
from heat.common import short_id
@@ -30,6 +33,8 @@ from heat.scaling import lbutils
from heat.scaling import rolling_update
from heat.scaling import template
+LOG = logging.getLogger(__name__)
+
(SCALED_RESOURCE_TYPE,) = ('OS::Heat::ScaledResource',)
@@ -274,12 +279,13 @@ class InstanceGroup(stack_resource.StackResource):
child_env=child_env)
# Subclasses use HOT templates
- att_func = 'get_attr'
- if att_func not in tmpl.functions:
- att_func = 'Fn::GetAtt'
+ att_func, res_func = 'get_attr', 'get_resource'
+ if att_func not in tmpl.functions or res_func not in tmpl.functions:
+ att_func, res_func = 'Fn::GetAtt', 'Ref'
get_attr = functools.partial(tmpl.functions[att_func], None, att_func)
+ get_res = functools.partial(tmpl.functions[res_func], None, res_func)
for odefn in self._nested_output_defns([k for k, d in definitions],
- get_attr):
+ get_attr, get_res):
tmpl.add_output(odefn)
return tmpl
@@ -390,6 +396,13 @@ class InstanceGroup(stack_resource.StackResource):
def get_reference_id(self):
return self.physical_resource_name_or_FnGetRefId()
+ def _group_data(self, refresh=False):
+ """Return a cached GroupInspector object for the nested stack."""
+ if refresh or getattr(self, '_group_inspector', None) is None:
+ inspector = grouputils.GroupInspector.from_parent_resource(self)
+ self._group_inspector = inspector
+ return self._group_inspector
+
def _resolve_attribute(self, name):
"""Resolves the resource's attributes.
@@ -397,10 +410,26 @@ class InstanceGroup(stack_resource.StackResource):
ip addresses.
"""
if name == self.INSTANCE_LIST:
- return u','.join(inst.FnGetAtt('PublicIp') or '0.0.0.0'
- for inst in grouputils.get_members(self)) or None
+ def listify(ips):
+ return u','.join(ips) or None
+
+ try:
+ output = self.get_output(name)
+ except (exception.NotFound,
+ exception.TemplateOutputError) as op_err:
+ LOG.debug('Falling back to grouputils due to %s', op_err)
+ else:
+ if isinstance(output, dict):
+ names = self._group_data().member_names(False)
+ return listify(output[n] for n in names if n in output)
+ else:
+ LOG.debug('Falling back to grouputils due to '
+ 'old (list-style) output format')
+
+ return listify(inst.FnGetAtt('PublicIp') or '0.0.0.0'
+ for inst in grouputils.get_members(self))
- def _nested_output_defns(self, resource_names, get_attr_fn):
+ def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
if isinstance(attr, six.string_types):
key = attr
@@ -408,7 +437,8 @@ class InstanceGroup(stack_resource.StackResource):
key = attr[0]
if key == self.INSTANCE_LIST:
- value = [get_attr_fn([r, 'PublicIp']) for r in resource_names]
+ value = {r: get_attr_fn([r, 'PublicIp'])
+ for r in resource_names}
yield output.OutputDefinition(key, value)
def child_template(self):
@@ -424,6 +454,13 @@ class InstanceGroup(stack_resource.StackResource):
},
}
+ def get_nested_parameters_stack(self):
+ """Return a nested group of size 1 for validation."""
+ child_template = self._create_template(1)
+ params = self.child_params()
+ name = "%s-%s" % (self.stack.name, self.name)
+ return self._parse_nested_stack(name, child_template, params)
+
def resource_mapping():
return {
diff --git a/heat/engine/resources/openstack/heat/random_string.py b/heat/engine/resources/openstack/heat/random_string.py
index d4758531e..9052b5062 100644
--- a/heat/engine/resources/openstack/heat/random_string.py
+++ b/heat/engine/resources/openstack/heat/random_string.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import random
+import random as random_module
import string
import six
@@ -25,6 +25,10 @@ from heat.engine import resource
from heat.engine import support
from heat.engine import translation
+# NOTE(pas-ha) Heat officially supports only POSIX::Linux platform
+# where os.urandom() and random.SystemRandom() are available
+random = random_module.SystemRandom()
+
class RandomString(resource.Resource):
"""A resource which generates a random string.
diff --git a/heat/engine/resources/openstack/heat/resource_chain.py b/heat/engine/resources/openstack/heat/resource_chain.py
index 0d3973348..317148a80 100644
--- a/heat/engine/resources/openstack/heat/resource_chain.py
+++ b/heat/engine/resources/openstack/heat/resource_chain.py
@@ -14,6 +14,8 @@
import functools
import six
+from oslo_log import log as logging
+
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
@@ -25,6 +27,8 @@ from heat.engine import rsrc_defn
from heat.engine import support
from heat.scaling import template as scl_template
+LOG = logging.getLogger(__name__)
+
class ResourceChain(stack_resource.StackResource):
"""Creates one or more resources with the same configuration.
@@ -130,8 +134,11 @@ class ResourceChain(stack_resource.StackResource):
att_func = 'get_attr'
get_attr = functools.partial(nested_template.functions[att_func],
None, att_func)
+ res_func = 'get_resource'
+ get_res = functools.partial(nested_template.functions[res_func],
+ None, res_func)
res_names = [k for k, d in name_def_tuples]
- for odefn in self._nested_output_defns(res_names, get_attr):
+ for odefn in self._nested_output_defns(res_names, get_attr, get_res):
nested_template.add_output(odefn)
return nested_template
@@ -139,19 +146,41 @@ class ResourceChain(stack_resource.StackResource):
def child_params(self):
return {}
+ def _attribute_output_name(self, *attr_path):
+ return ', '.join(six.text_type(a) for a in attr_path)
+
def get_attribute(self, key, *path):
+ if key == self.ATTR_ATTRIBUTES and not path:
+ raise exception.InvalidTemplateAttribute(resource=self.name,
+ key=key)
+
+ try:
+ output = self.get_output(self._attribute_output_name(key, *path))
+ except (exception.NotFound,
+ exception.TemplateOutputError) as op_err:
+ resource_types = self.properties[self.RESOURCES]
+ names = self._resource_names(resource_types)
+ if key.startswith('resource.'):
+ target = key.split('.', 2)[1]
+ if target not in names:
+ raise exception.NotFound(_("Member '%(mem)s' not "
+ "found in group resource "
+ "'%(grp)s'.") %
+ {'mem': target,
+ 'grp': self.name})
+ LOG.debug('Falling back to grouputils due to %s', op_err)
+ else:
+ if key == self.REFS:
+ return attributes.select_from_attribute(output, path)
+ return output
+
if key.startswith('resource.'):
return grouputils.get_nested_attrs(self, key, False, *path)
- resource_types = self.properties[self.RESOURCES]
- names = self._resource_names(resource_types)
if key == self.REFS:
vals = [grouputils.get_rsrc_id(self, key, False, n) for n in names]
return attributes.select_from_attribute(vals, path)
if key == self.ATTR_ATTRIBUTES:
- if not path:
- raise exception.InvalidTemplateAttribute(
- resource=self.name, key=key)
return dict((n, grouputils.get_rsrc_attr(
self, key, False, n, *path)) for n in names)
@@ -159,29 +188,32 @@ class ResourceChain(stack_resource.StackResource):
return [grouputils.get_rsrc_attr(self, key, False, n, *path)
for n in names]
- def _nested_output_defns(self, resource_names, get_attr_fn):
+ def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
if isinstance(attr, six.string_types):
key, path = attr, []
- output_name = attr
else:
key, path = attr[0], list(attr[1:])
- output_name = ', '.join(attr)
+ output_name = self._attribute_output_name(key, *path)
+ value = None
if key.startswith("resource."):
keycomponents = key.split('.', 2)
res_name = keycomponents[1]
- attr_name = keycomponents[2:]
- if attr_name and (res_name in resource_names):
- value = get_attr_fn([res_name] + attr_name + path)
- yield output.OutputDefinition(output_name, value)
-
+ attr_path = keycomponents[2:] + path
+ if res_name in resource_names:
+ if attr_path:
+ value = get_attr_fn([res_name] + attr_path)
+ else:
+ value = get_res_fn(res_name)
+ elif key == self.REFS:
+ value = [get_res_fn(r) for r in resource_names]
elif key == self.ATTR_ATTRIBUTES and path:
value = {r: get_attr_fn([r] + path) for r in resource_names}
- yield output.OutputDefinition(output_name, value)
-
elif key not in self.ATTRIBUTES:
value = [get_attr_fn([r, key] + path) for r in resource_names]
+
+ if value is not None:
yield output.OutputDefinition(output_name, value)
@staticmethod
diff --git a/heat/engine/resources/openstack/heat/resource_group.py b/heat/engine/resources/openstack/heat/resource_group.py
index 335139527..24f37e7d4 100644
--- a/heat/engine/resources/openstack/heat/resource_group.py
+++ b/heat/engine/resources/openstack/heat/resource_group.py
@@ -15,9 +15,10 @@ import collections
import copy
import functools
import itertools
-
import six
+from oslo_log import log as logging
+
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
@@ -34,6 +35,8 @@ from heat.engine import support
from heat.scaling import rolling_update
from heat.scaling import template as scl_template
+LOG = logging.getLogger(__name__)
+
class ResourceGroup(stack_resource.StackResource):
"""Creates one or more identically configured nested resources.
@@ -76,8 +79,10 @@ class ResourceGroup(stack_resource.StackResource):
PROPERTIES = (
COUNT, INDEX_VAR, RESOURCE_DEF, REMOVAL_POLICIES,
+ REMOVAL_POLICIES_MODE,
) = (
'count', 'index_var', 'resource_def', 'removal_policies',
+ 'removal_policies_mode'
)
_RESOURCE_DEF_KEYS = (
@@ -92,6 +97,12 @@ class ResourceGroup(stack_resource.StackResource):
'resource_list',
)
+ _REMOVAL_POLICY_MODES = (
+ REMOVAL_POLICY_APPEND, REMOVAL_POLICY_UPDATE
+ ) = (
+ 'append', 'update'
+ )
+
_ROLLING_UPDATES_SCHEMA_KEYS = (
MIN_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME,
) = (
@@ -195,6 +206,18 @@ class ResourceGroup(stack_resource.StackResource):
default=[],
support_status=support.SupportStatus(version='2015.1')
),
+ REMOVAL_POLICIES_MODE: properties.Schema(
+ properties.Schema.STRING,
+ _('How to handle changes to removal_policies on update. '
+ 'The default "append" mode appends to the internal list, '
+ '"update" replaces it on update.'),
+ default=REMOVAL_POLICY_APPEND,
+ constraints=[
+ constraints.AllowedValues(_REMOVAL_POLICY_MODES)
+ ],
+ update_allowed=True,
+ support_status=support.SupportStatus(version='10.0.0')
+ ),
}
attributes_schema = {
@@ -280,7 +303,7 @@ class ResourceGroup(stack_resource.StackResource):
if not self.get_size():
return
- first_name = next(self._resource_names(update_rsrc_data=False))
+ first_name = next(self._resource_names())
test_tmpl = self._assemble_nested([first_name],
include_all=True)
res_def = next(six.itervalues(test_tmpl.resource_definitions(None)))
@@ -307,38 +330,69 @@ class ResourceGroup(stack_resource.StackResource):
else:
return []
- def _name_blacklist(self, update_rsrc_data=True):
- """Resolve the remove_policies to names for removal."""
-
- nested = self.nested()
-
- # To avoid reusing names after removal, we store a comma-separated
- # blacklist in the resource data
- current_blacklist = self._current_blacklist()
+ def _get_new_blacklist_entries(self, properties, current_blacklist):
+ insp = grouputils.GroupInspector.from_parent_resource(self)
# Now we iterate over the removal policies, and update the blacklist
# with any additional names
- rsrc_names = set(current_blacklist)
-
- for r in self.properties[self.REMOVAL_POLICIES]:
+ for r in properties.get(self.REMOVAL_POLICIES, []):
if self.REMOVAL_RSRC_LIST in r:
# Tolerate string or int list values
for n in r[self.REMOVAL_RSRC_LIST]:
str_n = six.text_type(n)
- if not nested or str_n in nested:
- rsrc_names.add(str_n)
- continue
- rsrc = nested.resource_by_refid(str_n)
- if rsrc:
- rsrc_names.add(rsrc.name)
+ if (str_n in current_blacklist or
+ self.resource_id is None or
+ str_n in insp.member_names(include_failed=True)):
+ yield str_n
+ elif isinstance(n, six.string_types):
+ try:
+ refids = self.get_output(self.REFS_MAP)
+ except (exception.NotFound,
+ exception.TemplateOutputError) as op_err:
+ LOG.debug('Falling back to resource_by_refid() '
+ ' due to %s', op_err)
+ rsrc = self.nested().resource_by_refid(n)
+ if rsrc is not None:
+ yield rsrc.name
+ else:
+ if refids is not None:
+ for name, refid in refids.items():
+ if refid == n:
+ yield name
+ break
+
+ # Clear output cache from prior to stack update, so we don't get
+ # outdated values after stack update.
+ self._outputs = None
+
+ def _update_name_blacklist(self, properties):
+ """Resolve the remove_policies to names for removal."""
+ # To avoid reusing names after removal, we store a comma-separated
+ # blacklist in the resource data - in cases where you want to
+ # overwrite the stored data, removal_policies_mode: update can be used
+ curr_bl = set(self._current_blacklist())
+ p_mode = properties.get(self.REMOVAL_POLICIES_MODE,
+ self.REMOVAL_POLICY_APPEND)
+ if p_mode == self.REMOVAL_POLICY_UPDATE:
+ init_bl = set()
+ else:
+ init_bl = curr_bl
+ updated_bl = init_bl | set(self._get_new_blacklist_entries(properties,
+ curr_bl))
# If the blacklist has changed, update the resource data
- if update_rsrc_data and rsrc_names != set(current_blacklist):
- self.data_set('name_blacklist', ','.join(rsrc_names))
- return rsrc_names
+ if updated_bl != curr_bl:
+ self.data_set('name_blacklist', ','.join(sorted(updated_bl)))
+
+ def _name_blacklist(self):
+ """Get the list of resource names to blacklist."""
+ bl = set(self._current_blacklist())
+ if self.resource_id is None:
+ bl |= set(self._get_new_blacklist_entries(self.properties, bl))
+ return bl
- def _resource_names(self, size=None, update_rsrc_data=True):
- name_blacklist = self._name_blacklist(update_rsrc_data)
+ def _resource_names(self, size=None):
+ name_blacklist = self._name_blacklist()
if size is None:
size = self.get_size()
@@ -351,13 +405,13 @@ class ResourceGroup(stack_resource.StackResource):
candidates),
size)
- def _count_black_listed(self):
+ def _count_black_listed(self, existing_members):
"""Return the number of current resource names that are blacklisted."""
- existing_members = grouputils.get_member_names(self)
return len(self._name_blacklist() & set(existing_members))
def handle_create(self):
- if self.update_policy.get(self.BATCH_CREATE):
+ self._update_name_blacklist(self.properties)
+ if self.update_policy.get(self.BATCH_CREATE) and self.get_size():
batch_create = self.update_policy[self.BATCH_CREATE]
max_batch_size = batch_create[self.MAX_BATCH_SIZE]
pause_sec = batch_create[self.PAUSE_TIME]
@@ -415,6 +469,7 @@ class ResourceGroup(stack_resource.StackResource):
checkers = []
self.properties = json_snippet.properties(self.properties_schema,
self.context)
+ self._update_name_blacklist(self.properties)
if prop_diff and self.res_def_changed(prop_diff):
updaters = self._try_rolling_update()
if updaters:
@@ -430,7 +485,46 @@ class ResourceGroup(stack_resource.StackResource):
checkers[0].start()
return checkers
+ def _attribute_output_name(self, *attr_path):
+ if attr_path[0] == self.REFS:
+ return self.REFS
+ return ', '.join(six.text_type(a) for a in attr_path)
+
def get_attribute(self, key, *path):
+ if key == self.REMOVED_RSRC_LIST:
+ return self._current_blacklist()
+ if key == self.ATTR_ATTRIBUTES and not path:
+ raise exception.InvalidTemplateAttribute(resource=self.name,
+ key=key)
+
+ is_resource_ref = (key.startswith("resource.") and
+ not path and (len(key.split('.', 2)) == 2))
+ if is_resource_ref:
+ output_name = self.REFS_MAP
+ else:
+ output_name = self._attribute_output_name(key, *path)
+
+ if self.resource_id is not None:
+ try:
+ output = self.get_output(output_name)
+ except (exception.NotFound,
+ exception.TemplateOutputError) as op_err:
+ LOG.debug('Falling back to grouputils due to %s', op_err)
+ else:
+ if is_resource_ref:
+ try:
+ target = key.split('.', 2)[1]
+ return output[target]
+ except KeyError:
+ raise exception.NotFound(_("Member '%(mem)s' not "
+ "found in group resource "
+ "'%(grp)s'.") %
+ {'mem': target,
+ 'grp': self.name})
+ if key == self.REFS:
+ return attributes.select_from_attribute(output, path)
+ return output
+
if key.startswith("resource."):
return grouputils.get_nested_attrs(self, key, False, *path)
@@ -442,12 +536,7 @@ class ResourceGroup(stack_resource.StackResource):
refs_map = {n: grouputils.get_rsrc_id(self, key, False, n)
for n in names}
return refs_map
- if key == self.REMOVED_RSRC_LIST:
- return self._current_blacklist()
if key == self.ATTR_ATTRIBUTES:
- if not path:
- raise exception.InvalidTemplateAttribute(
- resource=self.name, key=key)
return dict((n, grouputils.get_rsrc_attr(
self, key, False, n, *path)) for n in names)
@@ -455,31 +544,38 @@ class ResourceGroup(stack_resource.StackResource):
return [grouputils.get_rsrc_attr(self, key, False, n, *path)
for n in names]
- def _nested_output_defns(self, resource_names, get_attr_fn):
+ def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
if isinstance(attr, six.string_types):
key, path = attr, []
- output_name = attr
else:
key, path = attr[0], list(attr[1:])
- output_name = ', '.join(attr)
+ output_name = self._attribute_output_name(key, *path)
+ value = None
if key.startswith("resource."):
keycomponents = key.split('.', 2)
res_name = keycomponents[1]
- attr_name = keycomponents[2:]
- if attr_name and (res_name in resource_names):
- value = get_attr_fn([res_name] + attr_name + path)
- yield output.OutputDefinition(output_name, value)
-
+ attr_path = keycomponents[2:] + path
+ if attr_path:
+ if res_name in resource_names:
+ value = get_attr_fn([res_name] + attr_path)
+ else:
+ output_name = key = self.REFS_MAP
elif key == self.ATTR_ATTRIBUTES and path:
value = {r: get_attr_fn([r] + path) for r in resource_names}
- yield output.OutputDefinition(output_name, value)
-
elif key not in self.ATTRIBUTES:
value = [get_attr_fn([r, key] + path) for r in resource_names]
+
+ if key == self.REFS:
+ value = [get_res_fn(r) for r in resource_names]
+
+ if value is not None:
yield output.OutputDefinition(output_name, value)
+ value = {r: get_res_fn(r) for r in resource_names}
+ yield output.OutputDefinition(self.REFS_MAP, value)
+
def build_resource_definition(self, res_name, res_defn):
res_def = copy.deepcopy(res_defn)
@@ -551,6 +647,15 @@ class ResourceGroup(stack_resource.StackResource):
return [recurse(v) for v in val]
return val
+ def _add_output_defns_to_template(self, tmpl, resource_names):
+ att_func = 'get_attr'
+ get_attr = functools.partial(tmpl.functions[att_func], None, att_func)
+ res_func = 'get_resource'
+ get_res = functools.partial(tmpl.functions[res_func], None, res_func)
+ for odefn in self._nested_output_defns(resource_names,
+ get_attr, get_res):
+ tmpl.add_output(odefn)
+
def _assemble_nested(self, names, include_all=False,
template_version=('heat_template_version',
'2015-04-30')):
@@ -560,13 +665,7 @@ class ResourceGroup(stack_resource.StackResource):
for k in names]
tmpl = scl_template.make_template(definitions,
version=template_version)
-
- att_func = 'get_attr'
- get_attr = functools.partial(tmpl.functions[att_func], None, att_func)
- for odefn in self._nested_output_defns([k for k, d in definitions],
- get_attr):
- tmpl.add_output(odefn)
-
+ self._add_output_defns_to_template(tmpl, [k for k, d in definitions])
return tmpl
def _assemble_for_rolling_update(self, total_capacity, max_updates,
@@ -608,8 +707,10 @@ class ResourceGroup(stack_resource.StackResource):
max_updates,
lambda: next(new_names),
self.build_resource_definition)
- return scl_template.make_template(definitions,
+ tmpl = scl_template.make_template(definitions,
version=template_version)
+ self._add_output_defns_to_template(tmpl, names)
+ return tmpl
def _try_rolling_update(self):
if self.update_policy[self.ROLLING_UPDATE]:
@@ -653,11 +754,12 @@ class ResourceGroup(stack_resource.StackResource):
while not duration.expired():
yield
- # blacklist count existing
- num_blacklist = self._count_black_listed()
-
# current capacity not including existing blacklisted
- curr_cap = len(self.nested()) - num_blacklist if self.nested() else 0
+ inspector = grouputils.GroupInspector.from_parent_resource(self)
+ num_blacklist = self._count_black_listed(
+ inspector.member_names(include_failed=False))
+ num_resources = inspector.size(include_failed=True)
+ curr_cap = num_resources - num_blacklist
batches = list(self._get_batches(self.get_size(), curr_cap, batch_size,
min_in_service))
@@ -688,6 +790,14 @@ class ResourceGroup(stack_resource.StackResource):
{},
adopt_data=resource_data)
+ def get_nested_parameters_stack(self):
+ """Return a nested group of size 1 for validation."""
+ names = self._resource_names(1)
+ child_template = self._assemble_nested(names)
+ params = self.child_params()
+ name = "%s-%s" % (self.stack.name, self.name)
+ return self._parse_nested_stack(name, child_template, params)
+
def resource_mapping():
return {
diff --git a/heat/engine/resources/openstack/heat/scaling_policy.py b/heat/engine/resources/openstack/heat/scaling_policy.py
index 90738f688..c0afc212e 100644
--- a/heat/engine/resources/openstack/heat/scaling_policy.py
+++ b/heat/engine/resources/openstack/heat/scaling_policy.py
@@ -19,17 +19,14 @@ from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
-from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine import support
-from heat.scaling import cooldown
from heat.scaling import scalingutil as sc_util
LOG = logging.getLogger(__name__)
-class AutoScalingPolicy(cooldown.CooldownMixin,
- signal_responder.SignalResponder):
+class AutoScalingPolicy(signal_responder.SignalResponder):
"""A resource to manage scaling of `OS::Heat::AutoScalingGroup`.
**Note** while it may incidentally support
@@ -172,34 +169,18 @@ class AutoScalingPolicy(cooldown.CooldownMixin,
) % {'alarm': self.name,
'group': asgn_id})
- self._check_scaling_allowed()
-
LOG.info('%(name)s alarm, adjusting group %(group)s with id '
'%(asgn_id)s by %(filter)s',
{'name': self.name, 'group': group.name,
'asgn_id': asgn_id,
'filter': self.properties[self.SCALING_ADJUSTMENT]})
- size_changed = False
- try:
- with group.frozen_properties():
- group.adjust(
- self.properties[self.SCALING_ADJUSTMENT],
- self.properties[self.ADJUSTMENT_TYPE],
- self.properties[self.MIN_ADJUSTMENT_STEP])
- size_changed = True
- except resource.NoActionRequired:
- raise
- except Exception:
- LOG.error("Error in performing scaling adjustment with "
- "%(name)s alarm for group %(group)s.",
- {'name': self.name, 'group': group.name})
- raise
- finally:
- self._finished_scaling("%s : %s" % (
+ with group.frozen_properties():
+ group.adjust(
+ self.properties[self.SCALING_ADJUSTMENT],
self.properties[self.ADJUSTMENT_TYPE],
- self.properties[self.SCALING_ADJUSTMENT]),
- size_changed=size_changed)
+ self.properties[self.MIN_ADJUSTMENT_STEP],
+ self.properties[self.COOLDOWN])
def _resolve_attribute(self, name):
if self.resource_id is None:
diff --git a/heat/engine/resources/openstack/heat/software_deployment.py b/heat/engine/resources/openstack/heat/software_deployment.py
index 56d6c72d1..ba9187288 100644
--- a/heat/engine/resources/openstack/heat/software_deployment.py
+++ b/heat/engine/resources/openstack/heat/software_deployment.py
@@ -12,17 +12,19 @@
# under the License.
import copy
+import six
+from six import itertools
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
-from six import itertools
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
+from heat.engine import output
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.heat import resource_group
@@ -699,6 +701,9 @@ class SoftwareDeploymentGroup(resource_group.ResourceGroup):
def res_def_changed(self, prop_diff):
return True
+ def _update_name_blacklist(self, properties):
+ pass
+
def _name_blacklist(self):
return set()
@@ -713,8 +718,7 @@ class SoftwareDeploymentGroup(resource_group.ResourceGroup):
'OS::Heat::SoftwareDeployment',
props, None)
- def get_attribute(self, key, *path):
- rg = super(SoftwareDeploymentGroup, self)
+ def _member_attribute_name(self, key):
if key == self.STDOUTS:
n_attr = SoftwareDeployment.STDOUT
elif key == self.STDERRS:
@@ -725,10 +729,25 @@ class SoftwareDeploymentGroup(resource_group.ResourceGroup):
# Allow any attribute valid for a single SoftwareDeployment
# including arbitrary outputs, so we can't validate here
n_attr = key
+ return n_attr
+
+ def get_attribute(self, key, *path):
+ rg = super(SoftwareDeploymentGroup, self)
+ n_attr = self._member_attribute_name(key)
rg_attr = rg.get_attribute(rg.ATTR_ATTRIBUTES, n_attr)
return attributes.select_from_attribute(rg_attr, path)
+ def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
+ for attr in self.referenced_attrs():
+ key = attr if isinstance(attr, six.string_types) else attr[0]
+ n_attr = self._member_attribute_name(key)
+ output_name = self._attribute_output_name(self.ATTR_ATTRIBUTES,
+ n_attr)
+ value = {r: get_attr_fn([r, n_attr])
+ for r in resource_names}
+ yield output.OutputDefinition(output_name, value)
+
def _try_rolling_update(self):
if self.update_policy[self.ROLLING_UPDATE]:
policy = self.update_policy[self.ROLLING_UPDATE]
diff --git a/heat/engine/resources/openstack/keystone/project.py b/heat/engine/resources/openstack/keystone/project.py
index 221b2c578..2fdecf19d 100644
--- a/heat/engine/resources/openstack/keystone/project.py
+++ b/heat/engine/resources/openstack/keystone/project.py
@@ -39,9 +39,9 @@ class KeystoneProject(resource.Resource):
entity = 'projects'
PROPERTIES = (
- NAME, DOMAIN, DESCRIPTION, ENABLED, PARENT,
+ NAME, DOMAIN, DESCRIPTION, ENABLED, PARENT, TAGS,
) = (
- 'name', 'domain', 'description', 'enabled', 'parent',
+ 'name', 'domain', 'description', 'enabled', 'parent', 'tags',
)
properties_schema = {
@@ -76,6 +76,13 @@ class KeystoneProject(resource.Resource):
support_status=support.SupportStatus(version='6.0.0'),
constraints=[constraints.CustomConstraint('keystone.project')]
),
+ TAGS: properties.Schema(
+ properties.Schema.LIST,
+ _('A list of tags for labeling and sorting projects.'),
+ support_status=support.SupportStatus(version='10.0.0'),
+ default=[],
+ update_allowed=True
+ ),
}
ATTRIBUTES = (
@@ -145,13 +152,15 @@ class KeystoneProject(resource.Resource):
domain = self.properties[self.DOMAIN]
enabled = self.properties[self.ENABLED]
parent = self.properties[self.PARENT]
+ tags = self.properties[self.TAGS]
project = self.client().projects.create(
name=project_name,
domain=domain,
description=description,
enabled=enabled,
- parent=parent)
+ parent=parent,
+ tags=tags)
self.resource_id_set(project.id)
@@ -165,13 +174,16 @@ class KeystoneProject(resource.Resource):
description = prop_diff.get(self.DESCRIPTION)
enabled = prop_diff.get(self.ENABLED)
domain = prop_diff.get(self.DOMAIN, self.properties[self.DOMAIN])
+ tags = (prop_diff.get(self.TAGS) or
+ self.properties[self.TAGS])
self.client().projects.update(
project=self.resource_id,
name=name,
description=description,
enabled=enabled,
- domain=domain
+ domain=domain,
+ tags=tags
)
def parse_live_resource_data(self, resource_properties, resource_data):
diff --git a/heat/engine/resources/openstack/magnum/cluster_template.py b/heat/engine/resources/openstack/magnum/cluster_template.py
index 8cdcd4102..d394492b3 100644
--- a/heat/engine/resources/openstack/magnum/cluster_template.py
+++ b/heat/engine/resources/openstack/magnum/cluster_template.py
@@ -19,6 +19,7 @@ from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
+from heat.engine import translation
class ClusterTemplate(resource.Resource):
@@ -40,7 +41,7 @@ class ClusterTemplate(resource.Resource):
DOCKER_VOLUME_SIZE, DOCKER_STORAGE_DRIVER, COE,
NETWORK_DRIVER, VOLUME_DRIVER, HTTP_PROXY, HTTPS_PROXY,
NO_PROXY, LABELS, TLS_DISABLED, PUBLIC, REGISTRY_ENABLED,
- SERVER_TYPE, MASTER_LB_ENABLED, FLOATING_IP_ENABLED
+ SERVER_TYPE, MASTER_LB_ENABLED, FLOATING_IP_ENABLED
) = (
'name', 'image', 'flavor', 'master_flavor', 'keypair',
'external_network', 'fixed_network', 'fixed_subnet', 'dns_nameserver',
@@ -93,7 +94,8 @@ class ClusterTemplate(resource.Resource):
),
EXTERNAL_NETWORK: properties.Schema(
properties.Schema.STRING,
- _('The external neutron network to attach the Cluster.'),
+ _('The external neutron network name or UUID to attach the '
+ 'Cluster.'),
constraints=[
constraints.CustomConstraint('neutron.network')
],
@@ -101,14 +103,16 @@ class ClusterTemplate(resource.Resource):
),
FIXED_NETWORK: properties.Schema(
properties.Schema.STRING,
- _('The fixed neutron network to attach the Cluster.'),
+ _('The fixed neutron network name or UUID to attach the '
+ 'Cluster.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
FIXED_SUBNET: properties.Schema(
properties.Schema.STRING,
- _('The fixed neutron subnet to attach the Cluster.'),
+ _('The fixed neutron subnet name or UUID to attach the '
+ 'Cluster.'),
constraints=[
constraints.CustomConstraint('neutron.subnet')
]
@@ -214,6 +218,34 @@ class ClusterTemplate(resource.Resource):
),
}
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.EXTERNAL_NETWORK],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='network'
+ ),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.FIXED_NETWORK],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='network'
+ ),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.FIXED_SUBNET],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='subnet'
+ )
+ ]
+
def validate(self):
"""Validate the provided params."""
super(ClusterTemplate, self).validate()
diff --git a/heat/engine/resources/openstack/mistral/external_resource.py b/heat/engine/resources/openstack/mistral/external_resource.py
index a55b354ce..58142c581 100644
--- a/heat/engine/resources/openstack/mistral/external_resource.py
+++ b/heat/engine/resources/openstack/mistral/external_resource.py
@@ -188,8 +188,8 @@ class MistralExternalResource(resource.Resource):
inputs = self.properties[self.INPUT]
execution = self.client().executions.create(
action_data[self.WORKFLOW],
- jsonutils.dumps(inputs),
- self.properties[self.DESCRIPTION],
+ workflow_input=jsonutils.dumps(inputs),
+ description=self.properties[self.DESCRIPTION],
**action_data[self.PARAMS])
LOG.debug('Mistral execution %(id)s params set to '
'%(params)s' % {'id': execution.id,
diff --git a/heat/engine/resources/openstack/mistral/workflow.py b/heat/engine/resources/openstack/mistral/workflow.py
index 4b24c1179..3c9ea23bc 100644
--- a/heat/engine/resources/openstack/mistral/workflow.py
+++ b/heat/engine/resources/openstack/mistral/workflow.py
@@ -587,7 +587,7 @@ class Workflow(signal_responder.SignalResponder,
try:
execution = self.client().executions.create(
self._workflow_name(),
- jsonutils.dumps(inputs_result),
+ workflow_input=jsonutils.dumps(inputs_result),
**params_result)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
diff --git a/heat/engine/resources/openstack/monasca/notification.py b/heat/engine/resources/openstack/monasca/notification.py
index 511f6df27..fc6681660 100644
--- a/heat/engine/resources/openstack/monasca/notification.py
+++ b/heat/engine/resources/openstack/monasca/notification.py
@@ -140,7 +140,7 @@ class MonascaNotification(resource.Resource):
}
raise exception.StackValidationFailed(message=msg)
elif (self.properties[self.TYPE] == self.EMAIL and
- not re.match('^\S+@\S+$', address)):
+ not re.match(r'^\S+@\S+$', address)):
msg = _('Address "%(addr)s" doesn\'t satisfies allowed format for '
'"%(email)s" type of "%(type)s" property') % {
'addr': address,
diff --git a/heat/engine/resources/openstack/neutron/rbac_policy.py b/heat/engine/resources/openstack/neutron/rbac_policy.py
index de67ecf54..451fcb0c5 100644
--- a/heat/engine/resources/openstack/neutron/rbac_policy.py
+++ b/heat/engine/resources/openstack/neutron/rbac_policy.py
@@ -13,6 +13,7 @@
from heat.common import exception
from heat.common.i18n import _
+from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
@@ -54,13 +55,17 @@ class RBACPolicy(neutron.NeutronResource):
# Change it when neutron supports more function in the future.
SUPPORTED_TYPES_ACTIONS = {
OBJECT_NETWORK: [ACCESS_AS_SHARED, ACCESS_AS_EXTERNAL],
- OBJECT_QOS_POLICY: [ACCESS_AS_SHARED]}
+ OBJECT_QOS_POLICY: [ACCESS_AS_SHARED],
+ }
properties_schema = {
OBJECT_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of the object that RBAC policy affects.'),
required=True,
+ constraints=[
+ constraints.AllowedValues(OBJECT_TYPE_KEYS)
+ ]
),
TARGET_TENANT: properties.Schema(
properties.Schema.STRING,
@@ -70,8 +75,14 @@ class RBACPolicy(neutron.NeutronResource):
),
ACTION: properties.Schema(
properties.Schema.STRING,
- _('Action for the RBAC policy.'),
+ _('Action for the RBAC policy. The allowed actions differ for '
+ 'different object types - only %(network)s objects can have an '
+ '%(external)s action.') % {'network': OBJECT_NETWORK,
+ 'external': ACCESS_AS_EXTERNAL},
required=True,
+ constraints=[
+ constraints.AllowedValues(ACTION_KEYS)
+ ]
),
OBJECT_ID: properties.Schema(
properties.Schema.STRING,
@@ -123,25 +134,26 @@ class RBACPolicy(neutron.NeutronResource):
self.client().delete_rbac_policy(self.resource_id)
def validate(self):
- """Validate the provided params."""
+ """Validate the provided properties."""
super(RBACPolicy, self).validate()
action = self.properties[self.ACTION]
obj_type = self.properties[self.OBJECT_TYPE]
# Validate obj_type and action per SUPPORTED_TYPES_ACTIONS.
- if obj_type not in self.SUPPORTED_TYPES_ACTIONS:
- msg = (_("Invalid object_type: %(obj_type)s. "
- "Valid object_type :%(value)s") %
- {'obj_type': obj_type,
- 'value': self.SUPPORTED_TYPES_ACTIONS.keys()})
- raise exception.StackValidationFailed(message=msg)
if action not in self.SUPPORTED_TYPES_ACTIONS[obj_type]:
- msg = (_("Invalid action %(action)s for object type "
- "%(obj_type)s. Valid actions :%(value)s") %
+ valid_actions = ', '.join(self.SUPPORTED_TYPES_ACTIONS[obj_type])
+ msg = (_('Invalid action "%(action)s" for object type '
+ '%(obj_type)s. Valid actions: %(valid_actions)s') %
{'action': action, 'obj_type': obj_type,
- 'value': self.SUPPORTED_TYPES_ACTIONS[obj_type]})
- raise exception.StackValidationFailed(message=msg)
+ 'valid_actions': valid_actions})
+ properties_section = self.properties.error_prefix[0]
+ path = [self.stack.t.RESOURCES, self.t.name,
+ self.stack.t.get_section_name(properties_section),
+ self.ACTION]
+ raise exception.StackValidationFailed(error='Property error',
+ path=path,
+ message=msg)
def resource_mapping():
diff --git a/heat/engine/resources/openstack/nova/floatingip.py b/heat/engine/resources/openstack/nova/floatingip.py
index c89eca120..789bb7662 100644
--- a/heat/engine/resources/openstack/nova/floatingip.py
+++ b/heat/engine/resources/openstack/nova/floatingip.py
@@ -109,7 +109,6 @@ class NovaFloatingIp(resource.Resource):
def handle_delete(self):
with self.client_plugin('neutron').ignore_not_found:
self.neutron().delete_floatingip(self.resource_id)
- return True
def _resolve_attribute(self, key):
if self.resource_id is None:
@@ -167,49 +166,29 @@ class NovaFloatingIpAssociation(resource.Resource):
return self.physical_resource_name_or_FnGetRefId()
def handle_create(self):
- server = self.client().servers.get(self.properties[self.SERVER])
- fl_ip = self.neutron().show_floatingip(
- self.properties[self.FLOATING_IP])
-
- ip_address = fl_ip['floatingip']['floating_ip_address']
- self.client().servers.add_floating_ip(server, ip_address)
+ self.client_plugin().associate_floatingip(
+ self.properties[self.SERVER], self.properties[self.FLOATING_IP])
self.resource_id_set(self.id)
def handle_delete(self):
if self.resource_id is None:
return
-
- try:
- server = self.client().servers.get(self.properties[self.SERVER])
- if server:
- fl_ip = self.neutron().show_floatingip(
- self.properties[self.FLOATING_IP])
- ip_address = fl_ip['floatingip']['floating_ip_address']
- self.client().servers.remove_floating_ip(server, ip_address)
- except Exception as e:
- if not (self.client_plugin().is_not_found(e)
- or self.client_plugin().is_conflict(e)
- or self.client_plugin('neutron').is_not_found(e)):
- raise
+ with self.client_plugin().ignore_not_found:
+ self.client_plugin().dissociate_floatingip(
+ self.properties[self.FLOATING_IP])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
# If floating_ip in prop_diff, we need to remove the old floating
# ip from the old server, and then to add the new floating ip
# to the old/new(if the server_id is changed) server.
- # If prop_diff only has the server_id, no need to remove the
- # floating ip from the old server, nova does this automatically
- # when calling add_floating_ip().
if self.FLOATING_IP in prop_diff:
self.handle_delete()
server_id = (prop_diff.get(self.SERVER) or
self.properties[self.SERVER])
fl_ip_id = (prop_diff.get(self.FLOATING_IP) or
self.properties[self.FLOATING_IP])
- server = self.client().servers.get(server_id)
- fl_ip = self.neutron().show_floatingip(fl_ip_id)
- ip_address = fl_ip['floatingip']['floating_ip_address']
- self.client().servers.add_floating_ip(server, ip_address)
+ self.client_plugin().associate_floatingip(server_id, fl_ip_id)
self.resource_id_set(self.id)
diff --git a/heat/engine/resources/openstack/nova/server.py b/heat/engine/resources/openstack/nova/server.py
index 036d19243..26867b27f 100644
--- a/heat/engine/resources/openstack/nova/server.py
+++ b/heat/engine/resources/openstack/nova/server.py
@@ -117,6 +117,9 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
'allocate_network', 'tag',
)
+ _IFACE_MANAGED_KEYS = (NETWORK_PORT, NETWORK_ID,
+ NETWORK_FIXED_IP, NETWORK_SUBNET)
+
_SOFTWARE_CONFIG_FORMATS = (
HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG
) = (
diff --git a/heat/engine/resources/openstack/nova/server_network_mixin.py b/heat/engine/resources/openstack/nova/server_network_mixin.py
index 160f39cf9..010d77389 100644
--- a/heat/engine/resources/openstack/nova/server_network_mixin.py
+++ b/heat/engine/resources/openstack/nova/server_network_mixin.py
@@ -267,17 +267,52 @@ class ServerNetworkMixin(object):
self.client('neutron').update_floatingip(
floating_ip, {'floatingip': {'port_id': None}})
- def _exclude_not_updated_networks(self, old_nets, new_nets):
- # make networks similar by adding None vlues for not used keys
+ def _find_best_match(self, existing_interfaces, specified_net):
+ specified_net_items = set(specified_net.items())
+ if specified_net.get(self.NETWORK_PORT) is not None:
+ for iface in existing_interfaces:
+ if (iface[self.NETWORK_PORT] ==
+ specified_net[self.NETWORK_PORT] and
+ specified_net_items.issubset(set(iface.items()))):
+ return iface
+ elif specified_net.get(self.NETWORK_FIXED_IP) is not None:
+ for iface in existing_interfaces:
+ if (iface[self.NETWORK_FIXED_IP] ==
+ specified_net[self.NETWORK_FIXED_IP] and
+ specified_net_items.issubset(set(iface.items()))):
+ return iface
+ else:
+ # Best subset intersection
+ best, matches, num = None, 0, 0
+ for iface in existing_interfaces:
+ iface_items = set(iface.items())
+ if specified_net_items.issubset(iface_items):
+ num = len(specified_net_items.intersection(iface_items))
+ if num > matches:
+ best, matches = iface, num
+ return best
+
+ def _exclude_not_updated_networks(self, old_nets, new_nets, interfaces):
+ not_updated_nets = []
+
+ # Update old_nets to match interfaces
+ self.update_networks_matching_iface_port(old_nets, interfaces)
+ # make networks similar by adding None values for not used keys
for key in self._NETWORK_KEYS:
# if _net.get(key) is '', convert to None
for _net in itertools.chain(new_nets, old_nets):
_net[key] = _net.get(key) or None
- # find matches and remove them from old and new networks
- not_updated_nets = [net for net in old_nets if net in new_nets]
- for net in not_updated_nets:
- old_nets.remove(net)
- new_nets.remove(net)
+
+ for new_net in list(new_nets):
+ new_net_reduced = {k: v for k, v in new_net.items()
+ if k not in self._IFACE_MANAGED_KEYS or
+ v is not None}
+ match = self._find_best_match(old_nets, new_net_reduced)
+ if match is not None:
+ not_updated_nets.append(match)
+ new_nets.remove(new_net)
+ old_nets.remove(match)
+
return not_updated_nets
def _get_network_id(self, net):
@@ -288,39 +323,31 @@ class ServerNetworkMixin(object):
'neutron').network_id_from_subnet_id(subnet)
return net_id
- def update_networks_matching_iface_port(self, nets, interfaces):
-
- def find_equal(port, net_id, ip, nets):
- for net in nets:
- if (net.get('port') == port or
- (net.get('fixed_ip') == ip and
- self._get_network_id(net) == net_id)):
- return net
+ def update_networks_matching_iface_port(self, old_nets, interfaces):
- def find_poor_net(net_id, nets):
- for net in nets:
- if (not net.get('port') and not net.get('fixed_ip') and
- self._get_network_id(net) == net_id):
- return net
-
- for iface in interfaces:
- # get interface properties
+ def get_iface_props(iface):
ipaddr = None
+ subnet = None
if len(iface.fixed_ips) > 0:
ipaddr = iface.fixed_ips[0]['ip_address']
- props = {'port': iface.port_id,
- 'net_id': iface.net_id,
- 'ip': ipaddr,
- 'nets': nets}
- # try to match by port or network_id with fixed_ip
- net = find_equal(**props)
- if net is not None:
- net['port'] = props['port']
- continue
- # find poor net that has only network_id
- net = find_poor_net(props['net_id'], nets)
- if net is not None:
- net['port'] = props['port']
+ subnet = iface.fixed_ips[0]['subnet_id']
+ return {self.NETWORK_PORT: iface.port_id,
+ self.NETWORK_ID: iface.net_id,
+ self.NETWORK_FIXED_IP: ipaddr,
+ self.NETWORK_SUBNET: subnet}
+
+ interfaces_net_props = [get_iface_props(iface) for iface in interfaces]
+ for old_net in old_nets:
+ if old_net[self.NETWORK_PORT] is None:
+ old_net[self.NETWORK_ID] = self._get_network_id(old_net)
+ old_net_reduced = {k: v for k, v in old_net.items()
+ if k in self._IFACE_MANAGED_KEYS and
+ v is not None}
+ match = self._find_best_match(interfaces_net_props,
+ old_net_reduced)
+ if match is not None:
+ old_net.update(match)
+ interfaces_net_props.remove(match)
def _get_available_networks(self):
# first we get the private networks owned by the tenant
@@ -402,11 +429,7 @@ class ServerNetworkMixin(object):
# remove not updated networks from old and new networks lists,
# also get list these networks
not_updated_nets = self._exclude_not_updated_networks(
- old_nets,
- new_nets)
-
- self.update_networks_matching_iface_port(
- old_nets + not_updated_nets, ifaces)
+ old_nets, new_nets, ifaces)
# according to nova interface-detach command detached port
# will be deleted
diff --git a/contrib/rackspace/heat_keystoneclient_v2/__init__.py b/heat/engine/resources/openstack/octavia/__init__.py
index e69de29bb..e69de29bb 100644
--- a/contrib/rackspace/heat_keystoneclient_v2/__init__.py
+++ b/heat/engine/resources/openstack/octavia/__init__.py
diff --git a/heat/engine/resources/openstack/octavia/health_monitor.py b/heat/engine/resources/openstack/octavia/health_monitor.py
new file mode 100644
index 000000000..626658a7b
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/health_monitor.py
@@ -0,0 +1,170 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common.i18n import _
+from heat.engine import attributes
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import translation
+
+
+class HealthMonitor(octavia_base.OctaviaBase):
+ """A resource to handle load balancer health monitors.
+
+ This resource creates and manages octavia healthmonitors,
+ which watches status of the load balanced servers.
+ """
+
+ # Properties inputs for the resources create/update.
+ PROPERTIES = (
+ ADMIN_STATE_UP, DELAY, EXPECTED_CODES, HTTP_METHOD,
+ MAX_RETRIES, POOL, TIMEOUT, TYPE, URL_PATH, TENANT_ID
+ ) = (
+ 'admin_state_up', 'delay', 'expected_codes', 'http_method',
+ 'max_retries', 'pool', 'timeout', 'type', 'url_path', 'tenant_id'
+ )
+
+ # Supported HTTP methods
+ HTTP_METHODS = (
+ GET, HEAT, POST, PUT, DELETE, TRACE, OPTIONS,
+ CONNECT, PATCH
+ ) = (
+ 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS',
+ 'CONNECT', 'PATCH'
+ )
+
+ # Supported output attributes of the resources.
+ ATTRIBUTES = (POOLS_ATTR) = ('pools')
+
+ properties_schema = {
+ ADMIN_STATE_UP: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('The administrative state of the health monitor.'),
+ default=True,
+ update_allowed=True
+ ),
+ DELAY: properties.Schema(
+ properties.Schema.INTEGER,
+ _('The minimum time in milliseconds between regular connections '
+ 'of the member.'),
+ required=True,
+ update_allowed=True,
+ constraints=[constraints.Range(min=0)]
+ ),
+ EXPECTED_CODES: properties.Schema(
+ properties.Schema.STRING,
+ _('The HTTP status codes expected in response from the '
+ 'member to declare it healthy. Specify one of the following '
+ 'values: a single value, such as 200. a list, such as 200, 202. '
+ 'a range, such as 200-204.'),
+ update_allowed=True,
+ default='200'
+ ),
+ HTTP_METHOD: properties.Schema(
+ properties.Schema.STRING,
+ _('The HTTP method used for requests by the monitor of type '
+ 'HTTP.'),
+ update_allowed=True,
+ default=GET,
+ constraints=[constraints.AllowedValues(HTTP_METHODS)]
+ ),
+ MAX_RETRIES: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Number of permissible connection failures before changing the '
+ 'member status to INACTIVE.'),
+ required=True,
+ update_allowed=True,
+ constraints=[constraints.Range(min=1, max=10)],
+ ),
+ POOL: properties.Schema(
+ properties.Schema.STRING,
+ _('ID or name of the load balancing pool.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('octavia.pool')
+ ]
+ ),
+ TIMEOUT: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Maximum number of milliseconds for a monitor to wait for a '
+ 'connection to be established before it times out.'),
+ required=True,
+ update_allowed=True,
+ constraints=[constraints.Range(min=0)]
+ ),
+ TYPE: properties.Schema(
+ properties.Schema.STRING,
+ _('One of predefined health monitor types.'),
+ required=True,
+ constraints=[
+ constraints.AllowedValues(['PING', 'TCP', 'HTTP', 'HTTPS']),
+ ]
+ ),
+ URL_PATH: properties.Schema(
+ properties.Schema.STRING,
+ _('The HTTP path used in the HTTP request used by the monitor to '
+ 'test a member health. A valid value is a string the begins '
+ 'with a forward slash (/).'),
+ update_allowed=True,
+ default='/'
+ ),
+ TENANT_ID: properties.Schema(
+ properties.Schema.STRING,
+ _('ID of the tenant who owns the health monitor.')
+ )
+ }
+
+ attributes_schema = {
+ POOLS_ATTR: attributes.Schema(
+ _('The list of Pools related to this monitor.'),
+ type=attributes.Schema.LIST
+ )
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.POOL],
+ client_plugin=self.client_plugin(),
+ finder='get_pool',
+ ),
+ ]
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items() if v is not None)
+ if self.POOL in props:
+ props['pool_id'] = props.pop(self.POOL)
+ return props
+
+ def _resource_create(self, properties):
+ return self.client().health_monitor_create(
+ json={'healthmonitor': properties})['healthmonitor']
+
+ def _resource_update(self, prop_diff):
+ self.client().health_monitor_set(
+ self.resource_id, json={'healthmonitor': prop_diff})
+
+ def _resource_delete(self):
+ self.client().health_monitor_delete(self.resource_id)
+
+ def _show_resource(self):
+ return self.client().health_monitor_show(self.resource_id)
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::HealthMonitor': HealthMonitor,
+ }
diff --git a/heat/engine/resources/openstack/octavia/l7policy.py b/heat/engine/resources/openstack/octavia/l7policy.py
new file mode 100644
index 000000000..889b7d257
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/l7policy.py
@@ -0,0 +1,205 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import attributes
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import translation
+
+
+class L7Policy(octavia_base.OctaviaBase):
+ """A resource for managing octavia L7Policies.
+
+ This resource manages L7Policies, which represent a collection of L7Rules.
+ L7Policy holds the action that should be performed when the rules are
+ matched (Redirect to Pool, Redirect to URL, Reject). L7Policy holds a
+ Listener id, so a Listener can evaluate a collection of L7Policies.
+ L7Policy will return True when all of the L7Rules that belong
+ to this L7Policy are matched. L7Policies under a specific Listener are
+ ordered and the first l7Policy that returns a match will be executed.
+ When none of the policies match the request gets forwarded to
+ listener.default_pool_id.
+ """
+
+ PROPERTIES = (
+ NAME, DESCRIPTION, ADMIN_STATE_UP, ACTION,
+ REDIRECT_POOL, REDIRECT_URL, POSITION, LISTENER
+ ) = (
+ 'name', 'description', 'admin_state_up', 'action',
+ 'redirect_pool', 'redirect_url', 'position', 'listener'
+ )
+
+ L7ACTIONS = (
+ REJECT, REDIRECT_TO_POOL, REDIRECT_TO_URL
+ ) = (
+ 'REJECT', 'REDIRECT_TO_POOL', 'REDIRECT_TO_URL'
+ )
+
+ ATTRIBUTES = (RULES_ATTR) = ('rules')
+
+ properties_schema = {
+ NAME: properties.Schema(
+ properties.Schema.STRING,
+ _('Name of the policy.'),
+ update_allowed=True
+ ),
+ DESCRIPTION: properties.Schema(
+ properties.Schema.STRING,
+ _('Description of the policy.'),
+ update_allowed=True
+ ),
+ ADMIN_STATE_UP: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('The administrative state of the policy.'),
+ default=True,
+ update_allowed=True
+ ),
+ ACTION: properties.Schema(
+ properties.Schema.STRING,
+ _('Action type of the policy.'),
+ required=True,
+ constraints=[constraints.AllowedValues(L7ACTIONS)],
+ update_allowed=True
+ ),
+ REDIRECT_POOL: properties.Schema(
+ properties.Schema.STRING,
+ _('ID or name of the pool for REDIRECT_TO_POOL action type.'),
+ constraints=[
+ constraints.CustomConstraint('octavia.pool')
+ ],
+ update_allowed=True
+ ),
+ REDIRECT_URL: properties.Schema(
+ properties.Schema.STRING,
+ _('URL for REDIRECT_TO_URL action type. '
+ 'This should be a valid URL string.'),
+ update_allowed=True
+ ),
+ POSITION: properties.Schema(
+ properties.Schema.NUMBER,
+ _('L7 policy position in ordered policies list. This must be '
+ 'an integer starting from 1. If not specified, policy will be '
+ 'placed at the tail of existing policies list.'),
+ constraints=[constraints.Range(min=1)],
+ update_allowed=True
+ ),
+ LISTENER: properties.Schema(
+ properties.Schema.STRING,
+ _('ID or name of the listener this policy belongs to.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('octavia.listener')
+ ]
+ ),
+ }
+
+ attributes_schema = {
+ RULES_ATTR: attributes.Schema(
+ _('L7Rules associated with this policy.'),
+ type=attributes.Schema.LIST
+ ),
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.LISTENER],
+ client_plugin=self.client_plugin(),
+ finder='get_listener',
+ ),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.REDIRECT_POOL],
+ client_plugin=self.client_plugin(),
+ finder='get_pool',
+ ),
+ ]
+
+ def validate(self):
+ super(L7Policy, self).validate()
+ if (self.properties[self.ACTION] == self.REJECT and
+ (self.properties[self.REDIRECT_POOL] is not None or
+ self.properties[self.REDIRECT_URL] is not None)):
+ msg = (_('Properties %(pool)s and %(url)s are not required when '
+ '%(action)s type is set to %(action_type)s.') %
+ {'pool': self.REDIRECT_POOL,
+ 'url': self.REDIRECT_URL,
+ 'action': self.ACTION,
+ 'action_type': self.REJECT})
+ raise exception.StackValidationFailed(message=msg)
+
+ if self.properties[self.ACTION] == self.REDIRECT_TO_POOL:
+ if self.properties[self.REDIRECT_URL] is not None:
+ raise exception.ResourcePropertyValueDependency(
+ prop1=self.REDIRECT_URL,
+ prop2=self.ACTION,
+ value=self.REDIRECT_TO_URL)
+ if self.properties[self.REDIRECT_POOL] is None:
+ msg = (_('Property %(pool)s is required when %(action)s '
+ 'type is set to %(action_type)s.') %
+ {'pool': self.REDIRECT_POOL,
+ 'action': self.ACTION,
+ 'action_type': self.REDIRECT_TO_POOL})
+ raise exception.StackValidationFailed(message=msg)
+
+ if self.properties[self.ACTION] == self.REDIRECT_TO_URL:
+ if self.properties[self.REDIRECT_POOL] is not None:
+ raise exception.ResourcePropertyValueDependency(
+ prop1=self.REDIRECT_POOL,
+ prop2=self.ACTION,
+ value=self.REDIRECT_TO_POOL)
+ if self.properties[self.REDIRECT_URL] is None:
+ msg = (_('Property %(url)s is required when %(action)s '
+ 'type is set to %(action_type)s.') %
+ {'url': self.REDIRECT_URL,
+ 'action': self.ACTION,
+ 'action_type': self.REDIRECT_TO_URL})
+ raise exception.StackValidationFailed(message=msg)
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items()
+ if v is not None)
+ if self.NAME not in props:
+ props[self.NAME] = self.physical_resource_name()
+ props['listener_id'] = props.pop(self.LISTENER)
+ if self.REDIRECT_POOL in props:
+ props['redirect_pool_id'] = props.pop(self.REDIRECT_POOL)
+ return props
+
+ def _resource_create(self, properties):
+ return self.client().l7policy_create(
+ json={'l7policy': properties})['l7policy']
+
+ def _resource_update(self, prop_diff):
+ if self.REDIRECT_POOL in prop_diff:
+ prop_diff['redirect_pool_id'] = prop_diff.pop(self.REDIRECT_POOL)
+ self.client().l7policy_set(
+ self.resource_id, json={'l7policy': prop_diff})
+
+ def _resource_delete(self):
+ self.client().l7policy_delete(self.resource_id)
+
+ def _show_resource(self):
+ return self.client().l7policy_show(self.resource_id)
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::L7Policy': L7Policy
+ }
diff --git a/heat/engine/resources/openstack/octavia/l7rule.py b/heat/engine/resources/openstack/octavia/l7rule.py
new file mode 100644
index 000000000..f1455359e
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/l7rule.py
@@ -0,0 +1,148 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import translation
+
+
+class L7Rule(octavia_base.OctaviaBase):
+ """A resource for managing octavia L7Rules.
+
+ This resource manages L7Rules, which represent a set of attributes
+ that defines which part of the request should be matched and how
+ it should be matched.
+ """
+
+ PROPERTIES = (
+ ADMIN_STATE_UP, L7POLICY, TYPE, COMPARE_TYPE,
+ INVERT, KEY, VALUE
+ ) = (
+ 'admin_state_up', 'l7policy', 'type', 'compare_type',
+ 'invert', 'key', 'value'
+ )
+
+ L7RULE_TYPES = (
+ HOST_NAME, PATH, FILE_TYPE, HEADER, COOKIE
+ ) = (
+ 'HOST_NAME', 'PATH', 'FILE_TYPE', 'HEADER', 'COOKIE'
+ )
+
+ L7COMPARE_TYPES = (
+ REGEX, STARTS_WITH, ENDS_WITH, CONTAINS, EQUAL_TO
+ ) = (
+ 'REGEX', 'STARTS_WITH', 'ENDS_WITH', 'CONTAINS', 'EQUAL_TO'
+ )
+
+ properties_schema = {
+ ADMIN_STATE_UP: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('The administrative state of the rule.'),
+ default=True,
+ update_allowed=True
+ ),
+ L7POLICY: properties.Schema(
+ properties.Schema.STRING,
+ _('ID or name of L7 policy this rule belongs to.'),
+ constraints=[
+ constraints.CustomConstraint('octavia.l7policy')
+ ],
+ required=True
+ ),
+ TYPE: properties.Schema(
+ properties.Schema.STRING,
+ _('Rule type.'),
+ constraints=[constraints.AllowedValues(L7RULE_TYPES)],
+ update_allowed=True,
+ required=True
+ ),
+ COMPARE_TYPE: properties.Schema(
+ properties.Schema.STRING,
+ _('Rule compare type.'),
+ constraints=[constraints.AllowedValues(L7COMPARE_TYPES)],
+ update_allowed=True,
+ required=True
+ ),
+ INVERT: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('Invert the compare type.'),
+ default=False,
+ update_allowed=True
+ ),
+ KEY: properties.Schema(
+ properties.Schema.STRING,
+ _('Key to compare. Relevant for HEADER and COOKIE types only.'),
+ update_allowed=True
+ ),
+ VALUE: properties.Schema(
+ properties.Schema.STRING,
+ _('Value to compare.'),
+ update_allowed=True,
+ required=True
+ )
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.L7POLICY],
+ client_plugin=self.client_plugin(),
+ finder='get_l7policy',
+ )
+ ]
+
+ def validate(self):
+ super(L7Rule, self).validate()
+ if (self.properties[self.TYPE] in (self.HEADER, self.COOKIE) and
+ self.properties[self.KEY] is None):
+ msg = (_('Property %(key)s is missing. '
+ 'This property should be specified for '
+ 'rules of %(header)s and %(cookie)s types.') %
+ {'key': self.KEY,
+ 'header': self.HEADER,
+ 'cookie': self.COOKIE})
+ raise exception.StackValidationFailed(message=msg)
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items()
+ if v is not None)
+ props.pop(self.L7POLICY)
+ return props
+
+ def _resource_create(self, properties):
+ return self.client().l7rule_create(self.properties[self.L7POLICY],
+ json={'rule': properties})['rule']
+
+ def _resource_update(self, prop_diff):
+ self.client().l7rule_set(self.resource_id,
+ self.properties[self.L7POLICY],
+ json={'rule': prop_diff})
+
+ def _resource_delete(self):
+ self.client().l7rule_delete(self.resource_id,
+ self.properties[self.L7POLICY])
+
+ def _show_resource(self):
+ return self.client().l7rule_show(self.resource_id,
+ self.properties[self.L7POLICY])
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::L7Rule': L7Rule
+ }
diff --git a/heat/engine/resources/openstack/octavia/listener.py b/heat/engine/resources/openstack/octavia/listener.py
new file mode 100644
index 000000000..ad2ee82b3
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/listener.py
@@ -0,0 +1,203 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import attributes
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import translation
+
+
+class Listener(octavia_base.OctaviaBase):
+ """A resource for managing octavia Listeners.
+
+ This resource creates and manages Neutron octavia Listeners,
+ which represent a listening endpoint for the vip.
+ """
+
+ PROPERTIES = (
+ PROTOCOL_PORT, PROTOCOL, LOADBALANCER, DEFAULT_POOL, NAME,
+ ADMIN_STATE_UP, DESCRIPTION, DEFAULT_TLS_CONTAINER_REF,
+ SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID
+ ) = (
+ 'protocol_port', 'protocol', 'loadbalancer', 'default_pool', 'name',
+ 'admin_state_up', 'description', 'default_tls_container_ref',
+ 'sni_container_refs', 'connection_limit', 'tenant_id'
+ )
+
+ SUPPORTED_PROTOCOLS = (TCP, HTTP, HTTPS, TERMINATED_HTTPS, PROXY) = (
+ 'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS', 'PROXY')
+
+ ATTRIBUTES = (
+ LOADBALANCERS_ATTR, DEFAULT_POOL_ID_ATTR
+ ) = (
+ 'loadbalancers', 'default_pool_id'
+ )
+
+ properties_schema = {
+ PROTOCOL_PORT: properties.Schema(
+ properties.Schema.INTEGER,
+ _('TCP or UDP port on which to listen for client traffic.'),
+ required=True,
+ constraints=[
+ constraints.Range(1, 65535),
+ ]
+ ),
+ PROTOCOL: properties.Schema(
+ properties.Schema.STRING,
+ _('Protocol on which to listen for the client traffic.'),
+ required=True,
+ constraints=[
+ constraints.AllowedValues(SUPPORTED_PROTOCOLS),
+ ]
+ ),
+ LOADBALANCER: properties.Schema(
+ properties.Schema.STRING,
+ _('ID or name of the load balancer with which listener '
+ 'is associated.'),
+ constraints=[
+ constraints.CustomConstraint('octavia.loadbalancer')
+ ]
+ ),
+ DEFAULT_POOL: properties.Schema(
+ properties.Schema.STRING,
+ _('ID or name of the default pool for the listener.'),
+ update_allowed=True,
+ constraints=[
+ constraints.CustomConstraint('octavia.pool')
+ ],
+ ),
+ NAME: properties.Schema(
+ properties.Schema.STRING,
+ _('Name of this listener.'),
+ update_allowed=True
+ ),
+ ADMIN_STATE_UP: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('The administrative state of this listener.'),
+ update_allowed=True,
+ default=True
+ ),
+ DESCRIPTION: properties.Schema(
+ properties.Schema.STRING,
+ _('Description of this listener.'),
+ update_allowed=True,
+ default=''
+ ),
+ DEFAULT_TLS_CONTAINER_REF: properties.Schema(
+ properties.Schema.STRING,
+ _('Default TLS container reference to retrieve TLS '
+ 'information.'),
+ update_allowed=True
+ ),
+ SNI_CONTAINER_REFS: properties.Schema(
+ properties.Schema.LIST,
+ _('List of TLS container references for SNI.'),
+ update_allowed=True
+ ),
+ CONNECTION_LIMIT: properties.Schema(
+ properties.Schema.INTEGER,
+ _('The maximum number of connections permitted for this '
+ 'load balancer. Defaults to -1, which is infinite.'),
+ update_allowed=True,
+ default=-1,
+ constraints=[
+ constraints.Range(min=-1),
+ ]
+ ),
+ TENANT_ID: properties.Schema(
+ properties.Schema.STRING,
+ _('The ID of the tenant who owns the listener.')
+ ),
+ }
+
+ attributes_schema = {
+ LOADBALANCERS_ATTR: attributes.Schema(
+ _('ID of the load balancer this listener is associated to.'),
+ type=attributes.Schema.LIST
+ ),
+ DEFAULT_POOL_ID_ATTR: attributes.Schema(
+ _('ID of the default pool this listener is associated to.'),
+ type=attributes.Schema.STRING
+ )
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.LOADBALANCER],
+ client_plugin=self.client_plugin(),
+ finder='get_loadbalancer',
+ ),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.DEFAULT_POOL],
+ client_plugin=self.client_plugin(),
+ finder='get_pool'
+ ),
+ ]
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items() if v is not None)
+ if self.NAME not in props:
+ props[self.NAME] = self.physical_resource_name()
+ if self.LOADBALANCER in props:
+ props['loadbalancer_id'] = props.pop(self.LOADBALANCER)
+ if self.DEFAULT_POOL in props:
+ props['default_pool_id'] = props.pop(self.DEFAULT_POOL)
+ return props
+
+ def validate(self):
+ super(Listener, self).validate()
+ if (self.properties[self.LOADBALANCER] is None
+ and self.properties[self.DEFAULT_POOL] is None):
+ raise exception.PropertyUnspecifiedError(self.LOADBALANCER,
+ self.DEFAULT_POOL)
+
+ if self.properties[self.PROTOCOL] == self.TERMINATED_HTTPS:
+ if self.properties[self.DEFAULT_TLS_CONTAINER_REF] is None:
+ msg = (_('Property %(ref)s required when protocol is '
+ '%(term)s.') % {'ref': self.DEFAULT_TLS_CONTAINER_REF,
+ 'term': self.TERMINATED_HTTPS})
+ raise exception.StackValidationFailed(message=msg)
+
+ def _resource_create(self, properties):
+ return self.client().listener_create(
+ json={'listener': properties})['listener']
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ self._update_called = False
+ if self.DEFAULT_POOL in prop_diff:
+ prop_diff['default_pool_id'] = prop_diff.pop(self.DEFAULT_POOL)
+ return prop_diff
+
+ def _resource_update(self, prop_diff):
+ self.client().listener_set(self.resource_id,
+ json={'listener': prop_diff})
+
+ def _resource_delete(self):
+ self.client().listener_delete(self.resource_id)
+
+ def _show_resource(self):
+ return self.client().listener_show(self.resource_id)
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::Listener': Listener,
+ }
diff --git a/heat/engine/resources/openstack/octavia/loadbalancer.py b/heat/engine/resources/openstack/octavia/loadbalancer.py
new file mode 100644
index 000000000..c4d477f7e
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/loadbalancer.py
@@ -0,0 +1,163 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common.i18n import _
+from heat.engine import attributes
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import translation
+
+
+class LoadBalancer(octavia_base.OctaviaBase):
+ """A resource for creating octavia Load Balancers.
+
+ This resource creates and manages octavia Load Balancers,
+ which allows traffic to be directed between servers.
+ """
+
+ PROPERTIES = (
+ DESCRIPTION, NAME, PROVIDER, VIP_ADDRESS, VIP_SUBNET,
+ ADMIN_STATE_UP, TENANT_ID
+ ) = (
+ 'description', 'name', 'provider', 'vip_address', 'vip_subnet',
+ 'admin_state_up', 'tenant_id'
+ )
+
+ ATTRIBUTES = (
+ VIP_ADDRESS_ATTR, VIP_PORT_ATTR, VIP_SUBNET_ATTR, POOLS_ATTR
+ ) = (
+ 'vip_address', 'vip_port_id', 'vip_subnet_id', 'pools'
+ )
+
+ properties_schema = {
+ DESCRIPTION: properties.Schema(
+ properties.Schema.STRING,
+ _('Description of this Load Balancer.'),
+ update_allowed=True,
+ default=''
+ ),
+ NAME: properties.Schema(
+ properties.Schema.STRING,
+ _('Name of this Load Balancer.'),
+ update_allowed=True
+ ),
+ PROVIDER: properties.Schema(
+ properties.Schema.STRING,
+ _('Provider for this Load Balancer.'),
+ ),
+ VIP_ADDRESS: properties.Schema(
+ properties.Schema.STRING,
+ _('IP address for the VIP.'),
+ constraints=[
+ constraints.CustomConstraint('ip_addr')
+ ],
+ ),
+ VIP_SUBNET: properties.Schema(
+ properties.Schema.STRING,
+ _('The name or ID of the subnet on which to allocate the VIP '
+ 'address.'),
+ constraints=[
+ constraints.CustomConstraint('neutron.subnet')
+ ],
+ required=True
+ ),
+ ADMIN_STATE_UP: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('The administrative state of this Load Balancer.'),
+ default=True,
+ update_allowed=True
+ ),
+ TENANT_ID: properties.Schema(
+ properties.Schema.STRING,
+ _('The ID of the tenant who owns the Load Balancer. Only '
+ 'administrative users can specify a tenant ID other than '
+ 'their own.'),
+ constraints=[
+ constraints.CustomConstraint('keystone.project')
+ ],
+ )
+ }
+
+ attributes_schema = {
+ VIP_ADDRESS_ATTR: attributes.Schema(
+ _('The VIP address of the LoadBalancer.'),
+ type=attributes.Schema.STRING
+ ),
+ VIP_PORT_ATTR: attributes.Schema(
+ _('The VIP port of the LoadBalancer.'),
+ type=attributes.Schema.STRING
+ ),
+ VIP_SUBNET_ATTR: attributes.Schema(
+ _('The VIP subnet of the LoadBalancer.'),
+ type=attributes.Schema.STRING
+ ),
+ POOLS_ATTR: attributes.Schema(
+ _('Pools this LoadBalancer is associated with.'),
+ type=attributes.Schema.LIST,
+ ),
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.VIP_SUBNET],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='subnet'
+ ),
+ ]
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items()
+ if v is not None)
+ if self.NAME not in props:
+ props[self.NAME] = self.physical_resource_name()
+ props['vip_subnet_id'] = props.pop(self.VIP_SUBNET)
+ return props
+
+ def handle_create(self):
+ properties = self._prepare_args(self.properties)
+ lb = self.client().load_balancer_create(
+ json={'loadbalancer': properties})['loadbalancer']
+ self.resource_id_set(lb['id'])
+
+ def check_create_complete(self, data):
+ return self._check_status()
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ if prop_diff:
+ self.client().load_balancer_set(
+ self.resource_id,
+ json={'loadbalancer': prop_diff})
+ return prop_diff
+
+ def check_update_complete(self, prop_diff):
+ if prop_diff:
+ return self._check_status()
+ return True
+
+ def _resource_delete(self):
+ self.client().load_balancer_delete(self.resource_id)
+
+ def _show_resource(self):
+ return self.client().load_balancer_show(
+ self.resource_id)
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::LoadBalancer': LoadBalancer
+ }
diff --git a/heat/engine/resources/openstack/octavia/octavia_base.py b/heat/engine/resources/openstack/octavia/octavia_base.py
new file mode 100644
index 000000000..7cb110411
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/octavia_base.py
@@ -0,0 +1,95 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common import exception
+from heat.engine import resource
+from heat.engine import support
+
+
+class OctaviaBase(resource.Resource):
+
+ default_client_name = 'octavia'
+
+ support_status = support.SupportStatus(version='10.0.0')
+
+ def _check_status(self, expected_status='ACTIVE'):
+ res = self._show_resource()
+ status = res['provisioning_status']
+ if status == 'ERROR':
+ raise exception.ResourceInError(resource_status=status)
+ return status == expected_status
+
+ def _check_deleted(self):
+ with self.client_plugin().ignore_not_found:
+ return self._check_status('DELETED')
+ return True
+
+ def _resolve_attribute(self, name):
+ if self.resource_id is None:
+ return
+ attributes = self._show_resource()
+ return attributes[name]
+
+ def handle_create(self):
+ return self._prepare_args(self.properties)
+
+ def check_create_complete(self, properties):
+ if self.resource_id is None:
+ try:
+ res = self._resource_create(properties)
+ self.resource_id_set(res['id'])
+ except Exception as ex:
+ if self.client_plugin().is_conflict(ex):
+ return False
+ raise
+
+ return self._check_status()
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ self._update_called = False
+ return prop_diff
+
+ def check_update_complete(self, prop_diff):
+ if not prop_diff:
+ return True
+
+ if not self._update_called:
+ try:
+ self._resource_update(prop_diff)
+ self._update_called = True
+ except Exception as ex:
+ if self.client_plugin().is_conflict(ex):
+ return False
+ raise
+
+ return self._check_status()
+
+ def handle_delete(self):
+ self._delete_called = False
+
+ def check_delete_complete(self, data):
+ if self.resource_id is None:
+ return True
+
+ if not self._delete_called:
+ try:
+ self._resource_delete()
+ self._delete_called = True
+ except Exception as ex:
+ if self.client_plugin().is_conflict(ex):
+ return self._check_status('DELETED')
+ elif self.client_plugin().is_not_found(ex):
+ return True
+ raise
+
+ return self._check_deleted()
diff --git a/heat/engine/resources/openstack/octavia/pool.py b/heat/engine/resources/openstack/octavia/pool.py
new file mode 100644
index 000000000..81737c23b
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/pool.py
@@ -0,0 +1,221 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import attributes
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import translation
+
+
+class Pool(octavia_base.OctaviaBase):
+ """A resource for managing Octavia Pools.
+
+ This resources manages octavia LBaaS Pools, which represent a group
+ of nodes. Pools define the subnet where nodes reside, balancing algorithm,
+ and the nodes themselves.
+ """
+
+ PROPERTIES = (
+ ADMIN_STATE_UP, DESCRIPTION, SESSION_PERSISTENCE, NAME,
+ LB_ALGORITHM, LISTENER, LOADBALANCER, PROTOCOL,
+ SESSION_PERSISTENCE_TYPE, SESSION_PERSISTENCE_COOKIE_NAME,
+ ) = (
+ 'admin_state_up', 'description', 'session_persistence', 'name',
+ 'lb_algorithm', 'listener', 'loadbalancer', 'protocol',
+ 'type', 'cookie_name'
+ )
+
+ SESSION_PERSISTENCE_TYPES = (
+ SOURCE_IP, HTTP_COOKIE, APP_COOKIE
+ ) = (
+ 'SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'
+ )
+
+ SUPPORTED_PROTOCOLS = (TCP, HTTP, HTTPS, TERMINATED_HTTPS, PROXY) = (
+ 'TCP', 'HTTP', 'HTTPS', 'TERMINATED_HTTPS', 'PROXY')
+
+ ATTRIBUTES = (
+ HEALTHMONITOR_ID_ATTR, LISTENERS_ATTR, MEMBERS_ATTR
+ ) = (
+ 'healthmonitor_id', 'listeners', 'members'
+ )
+
+ properties_schema = {
+ ADMIN_STATE_UP: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('The administrative state of this pool.'),
+ default=True,
+ update_allowed=True
+ ),
+ DESCRIPTION: properties.Schema(
+ properties.Schema.STRING,
+ _('Description of this pool.'),
+ update_allowed=True,
+ default=''
+ ),
+ SESSION_PERSISTENCE: properties.Schema(
+ properties.Schema.MAP,
+ _('Configuration of session persistence.'),
+ schema={
+ SESSION_PERSISTENCE_TYPE: properties.Schema(
+ properties.Schema.STRING,
+ _('Method of implementation of session '
+ 'persistence feature.'),
+ required=True,
+ constraints=[constraints.AllowedValues(
+ SESSION_PERSISTENCE_TYPES
+ )]
+ ),
+ SESSION_PERSISTENCE_COOKIE_NAME: properties.Schema(
+ properties.Schema.STRING,
+ _('Name of the cookie, '
+ 'required if type is APP_COOKIE.')
+ )
+ },
+ ),
+ NAME: properties.Schema(
+ properties.Schema.STRING,
+ _('Name of this pool.'),
+ update_allowed=True
+ ),
+ LB_ALGORITHM: properties.Schema(
+ properties.Schema.STRING,
+ _('The algorithm used to distribute load between the members of '
+ 'the pool.'),
+ required=True,
+ constraints=[
+ constraints.AllowedValues(['ROUND_ROBIN',
+ 'LEAST_CONNECTIONS', 'SOURCE_IP']),
+ ],
+ update_allowed=True,
+ ),
+ LISTENER: properties.Schema(
+ properties.Schema.STRING,
+ _('Listener name or ID to be associated with this pool.'),
+ constraints=[
+ constraints.CustomConstraint('octavia.listener')
+ ]
+ ),
+ LOADBALANCER: properties.Schema(
+ properties.Schema.STRING,
+ _('Loadbalancer name or ID to be associated with this pool.'),
+ constraints=[
+ constraints.CustomConstraint('octavia.loadbalancer')
+ ],
+ ),
+ PROTOCOL: properties.Schema(
+ properties.Schema.STRING,
+ _('Protocol of the pool.'),
+ required=True,
+ constraints=[
+ constraints.AllowedValues(SUPPORTED_PROTOCOLS),
+ ]
+ ),
+ }
+
+ attributes_schema = {
+ HEALTHMONITOR_ID_ATTR: attributes.Schema(
+ _('ID of the health monitor associated with this pool.'),
+ type=attributes.Schema.STRING
+ ),
+ LISTENERS_ATTR: attributes.Schema(
+ _('Listener associated with this pool.'),
+ type=attributes.Schema.STRING
+ ),
+ MEMBERS_ATTR: attributes.Schema(
+ _('Members associated with this pool.'),
+ cache_mode=attributes.Schema.CACHE_NONE,
+ type=attributes.Schema.LIST
+ ),
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.LISTENER],
+ client_plugin=self.client_plugin(),
+ finder='get_listener',
+ ),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.LOADBALANCER],
+ client_plugin=self.client_plugin(),
+ finder='get_loadbalancer',
+ ),
+ ]
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items() if v is not None)
+ if self.NAME not in props:
+ props[self.NAME] = self.physical_resource_name()
+ if self.LISTENER in props:
+ props['listener_id'] = props.pop(self.LISTENER)
+ if self.LOADBALANCER in props:
+ props['loadbalancer_id'] = props.pop(self.LOADBALANCER)
+ session_p = props.get(self.SESSION_PERSISTENCE)
+ if session_p is not None:
+ session_props = dict(
+ (k, v) for k, v in session_p.items() if v is not None)
+ props[self.SESSION_PERSISTENCE] = session_props
+ return props
+
+ def validate(self):
+ super(Pool, self).validate()
+ if (self.properties[self.LISTENER] is None and
+ self.properties[self.LOADBALANCER] is None):
+ raise exception.PropertyUnspecifiedError(self.LISTENER,
+ self.LOADBALANCER)
+
+ if self.properties[self.SESSION_PERSISTENCE] is not None:
+ session_p = self.properties[self.SESSION_PERSISTENCE]
+ persistence_type = session_p[self.SESSION_PERSISTENCE_TYPE]
+ if persistence_type == self.APP_COOKIE:
+ if not session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME):
+ msg = (_('Property %(cookie)s is required when %(sp)s '
+ 'type is set to %(app)s.') %
+ {'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME,
+ 'sp': self.SESSION_PERSISTENCE,
+ 'app': self.APP_COOKIE})
+ raise exception.StackValidationFailed(message=msg)
+ elif persistence_type == self.SOURCE_IP:
+ if session_p.get(self.SESSION_PERSISTENCE_COOKIE_NAME):
+ msg = (_('Property %(cookie)s must NOT be specified when '
+ '%(sp)s type is set to %(ip)s.') %
+ {'cookie': self.SESSION_PERSISTENCE_COOKIE_NAME,
+ 'sp': self.SESSION_PERSISTENCE,
+ 'ip': self.SOURCE_IP})
+ raise exception.StackValidationFailed(message=msg)
+
+ def _resource_create(self, properties):
+ return self.client().pool_create(json={'pool': properties})['pool']
+
+ def _resource_update(self, prop_diff):
+ self.client().pool_set(self.resource_id, json={'pool': prop_diff})
+
+ def _resource_delete(self):
+ self.client().pool_delete(self.resource_id)
+
+ def _show_resource(self):
+ return self.client().pool_show(self.resource_id)
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::Pool': Pool,
+ }
diff --git a/heat/engine/resources/openstack/octavia/pool_member.py b/heat/engine/resources/openstack/octavia/pool_member.py
new file mode 100644
index 000000000..e4d29e8e0
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/pool_member.py
@@ -0,0 +1,153 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common.i18n import _
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import translation
+
+
+class PoolMember(octavia_base.OctaviaBase):
+ """A resource for managing Octavia Pool Members.
+
+ A pool member represents a single backend node.
+ """
+
+ PROPERTIES = (
+ POOL, ADDRESS, PROTOCOL_PORT, MONITOR_ADDRESS, MONITOR_PORT,
+ WEIGHT, ADMIN_STATE_UP, SUBNET,
+ ) = (
+ 'pool', 'address', 'protocol_port', 'monitor_address', 'monitor_port',
+ 'weight', 'admin_state_up', 'subnet'
+ )
+
+ ATTRIBUTES = (
+ ADDRESS_ATTR, POOL_ID_ATTR
+ ) = (
+ 'address', 'pool_id'
+ )
+
+ properties_schema = {
+ POOL: properties.Schema(
+ properties.Schema.STRING,
+ _('Name or ID of the load balancing pool.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('octavia.pool')
+ ]
+ ),
+ ADDRESS: properties.Schema(
+ properties.Schema.STRING,
+ _('IP address of the pool member on the pool network.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('ip_addr')
+ ]
+ ),
+ PROTOCOL_PORT: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Port on which the pool member listens for requests or '
+ 'connections.'),
+ required=True,
+ constraints=[
+ constraints.Range(1, 65535),
+ ]
+ ),
+ MONITOR_ADDRESS: properties.Schema(
+ properties.Schema.STRING,
+ _('Alternate IP address which health monitor can use for '
+ 'health check.'),
+ constraints=[
+ constraints.CustomConstraint('ip_addr')
+ ]
+ ),
+ MONITOR_PORT: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Alternate Port which health monitor can use for health check.'),
+ constraints=[
+ constraints.Range(1, 65535),
+ ]
+ ),
+ WEIGHT: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Weight of pool member in the pool (default to 1).'),
+ default=1,
+ constraints=[
+ constraints.Range(0, 256),
+ ],
+ update_allowed=True
+ ),
+ ADMIN_STATE_UP: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('The administrative state of the pool member.'),
+ default=True,
+ update_allowed=True
+ ),
+ SUBNET: properties.Schema(
+ properties.Schema.STRING,
+ _('Subnet name or ID of this member.'),
+ constraints=[
+ constraints.CustomConstraint('neutron.subnet')
+ ],
+ ),
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.SUBNET],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='subnet'
+ ),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.POOL],
+ client_plugin=self.client_plugin(),
+ finder='get_pool'
+ ),
+ ]
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items() if v is not None)
+ props.pop(self.POOL)
+ if self.SUBNET in props:
+ props['subnet_id'] = props.pop(self.SUBNET)
+ return props
+
+ def _resource_create(self, properties):
+ return self.client().member_create(
+ self.properties[self.POOL], json={'member': properties})['member']
+
+ def _resource_update(self, prop_diff):
+ self.client().member_set(self.properties[self.POOL],
+ self.resource_id,
+ json={'member': prop_diff})
+
+ def _resource_delete(self):
+ self.client().member_delete(self.properties[self.POOL],
+ self.resource_id)
+
+ def _show_resource(self):
+ return self.client().member_show(
+ self.properties[self.POOL], self.resource_id)
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::PoolMember': PoolMember,
+ }
diff --git a/heat/engine/resources/openstack/sahara/cluster.py b/heat/engine/resources/openstack/sahara/cluster.py
index 2a71909be..b5e2166e0 100644
--- a/heat/engine/resources/openstack/sahara/cluster.py
+++ b/heat/engine/resources/openstack/sahara/cluster.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import re
+
from oslo_log import log as logging
from heat.common import exception
@@ -26,6 +28,15 @@ from heat.engine import translation
LOG = logging.getLogger(__name__)
+# NOTE(jfreud, pshchelo): copied from sahara/utils/api_validator.py
+SAHARA_NAME_REGEX = (r"^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]"
+ r"*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z]"
+ r"[A-Za-z0-9\-]*[A-Za-z0-9])$")
+
+# NOTE(jfreud): we do not use physical_resource_name_limit attr because we
+# prefer to truncate _after_ removing invalid characters
+SAHARA_CLUSTER_NAME_MAX_LENGTH = 80
+
class SaharaCluster(resource.Resource):
"""A resource for managing Sahara clusters.
@@ -69,6 +80,10 @@ class SaharaCluster(resource.Resource):
NAME: properties.Schema(
properties.Schema.STRING,
_('Hadoop cluster name.'),
+ constraints=[
+ constraints.Length(min=1, max=SAHARA_CLUSTER_NAME_MAX_LENGTH),
+ constraints.AllowedPattern(SAHARA_NAME_REGEX),
+ ],
),
PLUGIN_NAME: properties.Schema(
properties.Schema.STRING,
@@ -208,7 +223,9 @@ class SaharaCluster(resource.Resource):
name = self.properties[self.NAME]
if name:
return name
- return self.physical_resource_name()
+ return self.reduce_physical_resource_name(
+ re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name()),
+ SAHARA_CLUSTER_NAME_MAX_LENGTH)
def handle_create(self):
plugin_name = self.properties[self.PLUGIN_NAME]
diff --git a/heat/engine/resources/openstack/sahara/job_binary.py b/heat/engine/resources/openstack/sahara/job_binary.py
index f59cbf805..85cf705f2 100644
--- a/heat/engine/resources/openstack/sahara/job_binary.py
+++ b/heat/engine/resources/openstack/sahara/job_binary.py
@@ -18,6 +18,7 @@ from heat.common import exception
from heat.common.i18n import _
from heat.engine import properties
from heat.engine import resource
+from heat.engine import rsrc_defn
from heat.engine import support
@@ -105,8 +106,8 @@ class JobBinary(resource.Resource):
and uuidutils.is_uuid_like(url[len("internal-db://"):]))):
msg = _("%s is not a valid job location.") % url
raise exception.StackValidationFailed(
- path=[self.stack.t.get_section_name('resources'), self.name,
- self.stack.t.get_section_name('properties')],
+ path=[self.stack.t.RESOURCES, self.name,
+ self.stack.t.get_section_name(rsrc_defn.PROPERTIES)],
message=msg)
def handle_create(self):
diff --git a/heat/engine/resources/openstack/sahara/templates.py b/heat/engine/resources/openstack/sahara/templates.py
index 78a9eb1c2..eb4bc738f 100644
--- a/heat/engine/resources/openstack/sahara/templates.py
+++ b/heat/engine/resources/openstack/sahara/templates.py
@@ -24,6 +24,7 @@ from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
+from heat.engine import rsrc_defn
from heat.engine import support
from heat.engine import translation
@@ -346,9 +347,9 @@ class SaharaNodeGroupTemplate(resource.Resource):
'unsupported': ', '.join(unsupported_processes),
'allowed': ', '.join(allowed_processes)})
raise exception.StackValidationFailed(
- path=[self.stack.t.get_section_name('resources'),
+ path=[self.stack.t.RESOURCES,
self.name,
- self.stack.t.get_section_name('properties')],
+ self.stack.t.get_section_name(rsrc_defn.PROPERTIES)],
message=msg)
def parse_live_resource_data(self, resource_properties, resource_data):
diff --git a/heat/engine/resources/openstack/trove/cluster.py b/heat/engine/resources/openstack/trove/cluster.py
index 261b4a2cf..ced52fe30 100644
--- a/heat/engine/resources/openstack/trove/cluster.py
+++ b/heat/engine/resources/openstack/trove/cluster.py
@@ -20,6 +20,7 @@ from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
+from heat.engine import translation
LOG = logging.getLogger(__name__)
@@ -62,9 +63,15 @@ class TroveCluster(resource.Resource):
)
_INSTANCE_KEYS = (
- FLAVOR, VOLUME_SIZE,
+ FLAVOR, VOLUME_SIZE, NETWORKS,
) = (
- 'flavor', 'volume_size',
+ 'flavor', 'volume_size', 'networks',
+ )
+
+ _NICS_KEYS = (
+ NET, PORT, V4_FIXED_IP
+ ) = (
+ 'network', 'port', 'fixed_ip'
)
ATTRIBUTES = (
@@ -121,10 +128,50 @@ class TroveCluster(resource.Resource):
constraints=[
constraints.Range(1, 150),
]
- )
+ ),
+ NETWORKS: properties.Schema(
+ properties.Schema.LIST,
+ _("List of network interfaces to create on instance."),
+ support_status=support.SupportStatus(version='10.0.0'),
+ default=[],
+ schema=properties.Schema(
+ properties.Schema.MAP,
+ schema={
+ NET: properties.Schema(
+ properties.Schema.STRING,
+ _('Name or UUID of the network to attach '
+ 'this NIC to. Either %(port)s or '
+ '%(net)s must be specified.') % {
+ 'port': PORT, 'net': NET},
+ constraints=[
+ constraints.CustomConstraint(
+ 'neutron.network')
+ ]
+ ),
+ PORT: properties.Schema(
+ properties.Schema.STRING,
+ _('Name or UUID of Neutron port to '
+ 'attach this NIC to. Either %(port)s '
+ 'or %(net)s must be specified.')
+ % {'port': PORT, 'net': NET},
+ constraints=[
+ constraints.CustomConstraint(
+ 'neutron.port')
+ ],
+ ),
+ V4_FIXED_IP: properties.Schema(
+ properties.Schema.STRING,
+ _('Fixed IPv4 address for this NIC.'),
+ constraints=[
+ constraints.CustomConstraint('ip_addr')
+ ]
+ ),
+ },
+ ),
+ ),
}
)
- )
+ ),
}
attributes_schema = {
@@ -142,6 +189,30 @@ class TroveCluster(resource.Resource):
entity = 'clusters'
+ def translation_rules(self, properties):
+ return [
+ translation.TranslationRule(
+ properties,
+ translation.TranslationRule.RESOLVE,
+ translation_path=[self.INSTANCES, self.NETWORKS, self.NET],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='network'),
+ translation.TranslationRule(
+ properties,
+ translation.TranslationRule.RESOLVE,
+ translation_path=[self.INSTANCES, self.NETWORKS, self.PORT],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='port'),
+ translation.TranslationRule(
+ properties,
+ translation.TranslationRule.RESOLVE,
+ translation_path=[self.INSTANCES, self.FLAVOR],
+ client_plugin=self.client_plugin(),
+ finder='find_flavor_by_name_or_id'),
+ ]
+
def _cluster_name(self):
return self.properties[self.NAME] or self.physical_resource_name()
@@ -152,11 +223,14 @@ class TroveCluster(resource.Resource):
# convert instances to format required by troveclient
instances = []
for instance in self.properties[self.INSTANCES]:
- instances.append({
- 'flavorRef': self.client_plugin().find_flavor_by_name_or_id(
- instance[self.FLAVOR]),
- 'volume': {'size': instance[self.VOLUME_SIZE]}
- })
+ instance_dict = {
+ 'flavorRef': instance[self.FLAVOR],
+ 'volume': {'size': instance[self.VOLUME_SIZE]},
+ }
+ instance_nics = self.get_instance_nics(instance)
+ if instance_nics:
+ instance_dict["nics"] = instance_nics
+ instances.append(instance_dict)
args = {
'name': self._cluster_name(),
@@ -168,6 +242,21 @@ class TroveCluster(resource.Resource):
self.resource_id_set(cluster.id)
return cluster.id
+ def get_instance_nics(self, instance):
+ nics = []
+ for nic in instance[self.NETWORKS]:
+ nic_dict = {}
+ if nic.get(self.NET):
+ nic_dict['net-id'] = nic.get(self.NET)
+ if nic.get(self.PORT):
+ nic_dict['port-id'] = nic.get(self.PORT)
+ ip = nic.get(self.V4_FIXED_IP)
+ if ip:
+ nic_dict['v4-fixed-ip'] = ip
+ nics.append(nic_dict)
+
+ return nics
+
def _refresh_cluster(self, cluster_id):
try:
cluster = self.client().clusters.get(cluster_id)
@@ -256,6 +345,24 @@ class TroveCluster(resource.Resource):
datastore_type, datastore_version,
self.DATASTORE_TYPE, self.DATASTORE_VERSION)
+ # check validity of instances' NETWORKS
+ is_neutron = self.is_using_neutron()
+ for instance in self.properties[self.INSTANCES]:
+ for nic in instance[self.NETWORKS]:
+ # 'nic.get(self.PORT) is not None' including two cases:
+ # 1. has set port value in template
+ # 2. using 'get_resource' to reference a new resource
+ if not is_neutron and nic.get(self.PORT) is not None:
+ msg = (_("Can not use %s property on Nova-network.")
+ % self.PORT)
+ raise exception.StackValidationFailed(message=msg)
+
+ if (bool(nic.get(self.NET) is not None) ==
+ bool(nic.get(self.PORT) is not None)):
+ msg = (_("Either %(net)s or %(port)s must be provided.")
+ % {'net': self.NET, 'port': self.PORT})
+ raise exception.StackValidationFailed(message=msg)
+
def _resolve_attribute(self, name):
if self.resource_id is None:
return
diff --git a/heat/engine/resources/openstack/zun/container.py b/heat/engine/resources/openstack/zun/container.py
index 7c333bfcf..56a312c24 100644
--- a/heat/engine/resources/openstack/zun/container.py
+++ b/heat/engine/resources/openstack/zun/container.py
@@ -33,11 +33,19 @@ class Container(resource.Resource):
PROPERTIES = (
NAME, IMAGE, COMMAND, CPU, MEMORY,
ENVIRONMENT, WORKDIR, LABELS, IMAGE_PULL_POLICY,
- RESTART_POLICY, INTERACTIVE, IMAGE_DRIVER
+ RESTART_POLICY, INTERACTIVE, IMAGE_DRIVER, HINTS,
+ HOSTNAME, SECURITY_GROUPS, MOUNTS,
) = (
'name', 'image', 'command', 'cpu', 'memory',
'environment', 'workdir', 'labels', 'image_pull_policy',
- 'restart_policy', 'interactive', 'image_driver'
+ 'restart_policy', 'interactive', 'image_driver', 'hints',
+ 'hostname', 'security_groups', 'mounts',
+ )
+
+ _MOUNT_KEYS = (
+ VOLUME_ID, MOUNT_PATH, VOLUME_SIZE
+ ) = (
+ 'volume_id', 'mount_path', 'volume_size',
)
ATTRIBUTES = (
@@ -110,6 +118,48 @@ class Container(resource.Resource):
constraints.AllowedValues(['docker', 'glance']),
]
),
+ HINTS: properties.Schema(
+ properties.Schema.MAP,
+ _('Arbitrary key-value pairs for scheduler to select host.'),
+ support_status=support.SupportStatus(version='10.0.0'),
+ ),
+ HOSTNAME: properties.Schema(
+ properties.Schema.STRING,
+ _('The hostname of the container.'),
+ support_status=support.SupportStatus(version='10.0.0'),
+ ),
+ SECURITY_GROUPS: properties.Schema(
+ properties.Schema.LIST,
+ _('List of security group names or IDs.'),
+ support_status=support.SupportStatus(version='10.0.0'),
+ default=[]
+ ),
+ MOUNTS: properties.Schema(
+ properties.Schema.LIST,
+ _('A list of volumes mounted inside the container.'),
+ schema=properties.Schema(
+ properties.Schema.MAP,
+ schema={
+ VOLUME_ID: properties.Schema(
+ properties.Schema.STRING,
+ _('The ID or name of the cinder volume mount to '
+ 'the container.'),
+ constraints=[
+ constraints.CustomConstraint('cinder.volume')
+ ]
+ ),
+ VOLUME_SIZE: properties.Schema(
+ properties.Schema.INTEGER,
+ _('The size of the cinder volume to create.'),
+ ),
+ MOUNT_PATH: properties.Schema(
+ properties.Schema.STRING,
+ _('The filesystem path inside the container.'),
+ required=True,
+ ),
+ },
+ )
+ ),
}
attributes_schema = {
@@ -142,12 +192,38 @@ class Container(resource.Resource):
'"unless-stopped".') % policy
raise exception.StackValidationFailed(message=msg)
+ mounts = self.properties[self.MOUNTS] or []
+ for mount in mounts:
+ self._validate_mount(mount)
+
+ def _validate_mount(self, mount):
+ volume_id = mount.get(self.VOLUME_ID)
+ volume_size = mount.get(self.VOLUME_SIZE)
+
+ if volume_id is None and volume_size is None:
+ msg = _('One of the properties "%(id)s" or "%(size)s" '
+ 'should be set for the specified mount of '
+ 'container "%(container)s".'
+ '') % dict(id=self.VOLUME_ID,
+ size=self.VOLUME_SIZE,
+ container=self.name)
+ raise exception.StackValidationFailed(message=msg)
+
+ # Don't allow specify volume_id and volume_size at the same time
+ if volume_id and volume_size:
+ raise exception.ResourcePropertyConflict(
+ "/".join([self.NETWORKS, self.VOLUME_ID]),
+ "/".join([self.NETWORKS, self.VOLUME_SIZE]))
+
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
policy = args.pop(self.RESTART_POLICY, None)
if policy:
args[self.RESTART_POLICY] = self._parse_restart_policy(policy)
+ mounts = args.pop(self.MOUNTS, None)
+ if mounts:
+ args[self.MOUNTS] = self._build_mounts(mounts)
container = self.client().containers.run(**args)
self.resource_id_set(container.uuid)
return container.uuid
@@ -165,6 +241,17 @@ class Container(resource.Resource):
return restart_policy
+ def _build_mounts(self, mounts):
+ mnts = []
+ for mount in mounts:
+ mnt_info = {'destination': mount[self.MOUNT_PATH]}
+ if mount.get(self.VOLUME_ID):
+ mnt_info['source'] = mount[self.VOLUME_ID]
+ if mount.get(self.VOLUME_SIZE):
+ mnt_info['size'] = mount[self.VOLUME_SIZE]
+ mnts.append(mnt_info)
+ return mnts
+
def check_create_complete(self, id):
container = self.client().containers.get(id)
if container.status in ('Creating', 'Created'):
@@ -202,9 +289,20 @@ class Container(resource.Resource):
if not self.resource_id:
return
try:
- self.client().containers.delete(self.resource_id, force=True)
+ self.client().containers.delete(self.resource_id, stop=True)
+ return self.resource_id
+ except Exception as exc:
+ self.client_plugin().ignore_not_found(exc)
+
+ def check_delete_complete(self, id):
+ if not id:
+ return True
+ try:
+ self.client().containers.get(id)
except Exception as exc:
self.client_plugin().ignore_not_found(exc)
+ return True
+ return False
def _resolve_attribute(self, name):
if self.resource_id is None:
diff --git a/heat/engine/resources/stack_resource.py b/heat/engine/resources/stack_resource.py
index 097074142..a29ab8eca 100644
--- a/heat/engine/resources/stack_resource.py
+++ b/heat/engine/resources/stack_resource.py
@@ -29,6 +29,7 @@ from heat.engine import environment
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack as parser
+from heat.engine import stk_defn
from heat.engine import template
from heat.objects import raw_template
from heat.objects import stack as stack_object
@@ -198,6 +199,24 @@ class StackResource(resource.Resource):
return self.nested().preview_resources()
+ def get_nested_parameters_stack(self):
+ """Return a stack for schema validation.
+
+ This returns a stack to be introspected for building parameters schema.
+ It can be customized by subclass to return a restricted version of what
+ will be running.
+ """
+ try:
+ child_template = self.child_template()
+ params = self.child_params()
+ except NotImplementedError:
+ class_name = reflection.get_class_name(self, fully_qualified=False)
+ LOG.warning("Nested parameters of '%s' not yet "
+ "implemented", class_name)
+ return
+ name = "%s-%s" % (self.stack.name, self.name)
+ return self._parse_nested_stack(name, child_template, params)
+
def _parse_child_template(self, child_template, child_env):
parsed_child_template = child_template
if isinstance(parsed_child_template, template.Template):
@@ -221,6 +240,7 @@ class StackResource(resource.Resource):
parsed_template = self._child_parsed_template(child_template,
child_env)
+ self._validate_nested_resources(parsed_template)
# Note we disable rollback for nested stacks, since they
# should be rolled back by the parent stack on failure
@@ -247,7 +267,6 @@ class StackResource(resource.Resource):
def _child_parsed_template(self, child_template, child_env):
parsed_template = self._parse_child_template(child_template, child_env)
- self._validate_nested_resources(parsed_template)
# Don't overwrite the attributes_schema for subclasses that
# define their own attributes_schema.
@@ -259,12 +278,16 @@ class StackResource(resource.Resource):
def _validate_nested_resources(self, templ):
if cfg.CONF.max_resources_per_stack == -1:
return
+
total_resources = (len(templ[templ.RESOURCES]) +
self.stack.total_resources(self.root_stack_id))
- if self.nested():
- # It's an update and these resources will be deleted
- total_resources -= len(self.nested().resources)
+ identity = self.nested_identifier()
+ if identity is not None:
+ existing = self.rpc_client().list_stack_resources(self.context,
+ identity)
+ # Don't double-count existing resources during an update
+ total_resources -= len(existing)
if (total_resources > cfg.CONF.max_resources_per_stack):
message = exception.StackResourceLimitExceeded.msg_fmt
@@ -316,12 +339,15 @@ class StackResource(resource.Resource):
self.resource_id_set(result['stack_id'])
- def _stack_kwargs(self, user_params, child_template, adopt_data=None):
-
+ def child_definition(self, child_template=None, user_params=None,
+ nested_identifier=None):
if user_params is None:
user_params = self.child_params()
if child_template is None:
child_template = self.child_template()
+ if nested_identifier is None:
+ nested_identifier = self.nested_identifier()
+
child_env = environment.get_child_environment(
self.stack.env,
user_params,
@@ -330,6 +356,14 @@ class StackResource(resource.Resource):
parsed_template = self._child_parsed_template(child_template,
child_env)
+ return stk_defn.StackDefinition(self.context, parsed_template,
+ nested_identifier,
+ None)
+
+ def _stack_kwargs(self, user_params, child_template, adopt_data=None):
+ defn = self.child_definition(child_template, user_params)
+ parsed_template = defn.t
+
if adopt_data is None:
template_id = parsed_template.store(self.context)
return {
@@ -341,7 +375,7 @@ class StackResource(resource.Resource):
else:
return {
'template': parsed_template.t,
- 'params': child_env.user_env_as_dict(),
+ 'params': defn.env.user_env_as_dict(),
'files': parsed_template.files,
}
@@ -594,10 +628,7 @@ class StackResource(resource.Resource):
def get_output(self, op):
"""Return the specified Output value from the nested stack.
- If the output key does not exist, raise an InvalidTemplateAttribute
- exception. (Note that TemplateResource.get_attribute() relies on this
- particular exception, not KeyError, being raised if the key does not
- exist.)
+ If the output key does not exist, raise a NotFound exception.
"""
if (self._outputs is None or
(op in self._outputs and
@@ -614,8 +645,8 @@ class StackResource(resource.Resource):
self._outputs = {o[rpc_api.OUTPUT_KEY]: o for o in outputs}
if op not in self._outputs:
- raise exception.InvalidTemplateAttribute(resource=self.name,
- key=op)
+ raise exception.NotFound(_('Specified output key %s not '
+ 'found.') % op)
output_data = self._outputs[op]
if rpc_api.OUTPUT_ERROR in output_data:
@@ -627,4 +658,8 @@ class StackResource(resource.Resource):
return output_data[rpc_api.OUTPUT_VALUE]
def _resolve_attribute(self, name):
- return self.get_output(name)
+ try:
+ return self.get_output(name)
+ except exception.NotFound:
+ raise exception.InvalidTemplateAttribute(resource=self.name,
+ key=name)
diff --git a/heat/engine/resources/template_resource.py b/heat/engine/resources/template_resource.py
index 60e665b82..7abf6d287 100644
--- a/heat/engine/resources/template_resource.py
+++ b/heat/engine/resources/template_resource.py
@@ -306,27 +306,32 @@ class TemplateResource(stack_resource.StackResource):
if self.resource_id is None:
return six.text_type(self.name)
+ if STACK_ID_OUTPUT in self.attributes.cached_attrs:
+ return self.attributes.cached_attrs[STACK_ID_OUTPUT]
+
stack_identity = self.nested_identifier()
+ reference_id = stack_identity.arn()
+
try:
if self._outputs is not None:
- return self.get_output(STACK_ID_OUTPUT)
-
- output = self.rpc_client().show_output(self.context,
- dict(stack_identity),
- STACK_ID_OUTPUT)
- if rpc_api.OUTPUT_ERROR in output:
- raise exception.TemplateOutputError(
- resource=self.name,
- attribute=STACK_ID_OUTPUT,
- message=output[rpc_api.OUTPUT_ERROR])
+ reference_id = self.get_output(STACK_ID_OUTPUT)
+ elif STACK_ID_OUTPUT in self.attributes:
+ output = self.rpc_client().show_output(self.context,
+ dict(stack_identity),
+ STACK_ID_OUTPUT)
+ if rpc_api.OUTPUT_ERROR in output:
+ raise exception.TemplateOutputError(
+ resource=self.name,
+ attribute=STACK_ID_OUTPUT,
+ message=output[rpc_api.OUTPUT_ERROR])
+ reference_id = output[rpc_api.OUTPUT_VALUE]
except exception.TemplateOutputError as err:
LOG.info('%s', err)
- except (exception.InvalidTemplateAttribute, exception.NotFound):
+ except exception.NotFound:
pass
- else:
- return output[rpc_api.OUTPUT_VALUE]
- return stack_identity.arn()
+ self.attributes.set_cached_attr(STACK_ID_OUTPUT, reference_id)
+ return reference_id
def get_attribute(self, key, *path):
if self.resource_id is None:
@@ -337,4 +342,9 @@ class TemplateResource(stack_resource.StackResource):
return grouputils.get_nested_attrs(self, key, False, *path)
# then look for normal outputs
- return attributes.select_from_attribute(self.get_output(key), path)
+ try:
+ return attributes.select_from_attribute(self.get_output(key),
+ path)
+ except exception.NotFound:
+ raise exception.InvalidTemplateAttribute(resource=self.name,
+ key=key)
diff --git a/heat/engine/resources/wait_condition.py b/heat/engine/resources/wait_condition.py
index a7d68f0fb..87d797237 100644
--- a/heat/engine/resources/wait_condition.py
+++ b/heat/engine/resources/wait_condition.py
@@ -70,7 +70,7 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
raise ValueError(_("Metadata format invalid"))
new_entry = signal_data.copy()
- unique_id = new_entry.pop(self.UNIQUE_ID)
+ unique_id = six.text_type(new_entry.pop(self.UNIQUE_ID))
new_rsrc_metadata = latest_rsrc_metadata.copy()
if unique_id in new_rsrc_metadata:
diff --git a/heat/engine/rsrc_defn.py b/heat/engine/rsrc_defn.py
index c5537ce74..517ebb1d3 100644
--- a/heat/engine/rsrc_defn.py
+++ b/heat/engine/rsrc_defn.py
@@ -26,6 +26,17 @@ from heat.engine import properties
__all__ = ['ResourceDefinition']
+# Field names that can be passed to Template.get_section_name() in order to
+# determine the appropriate name for a particular template format.
+FIELDS = (
+ TYPE, PROPERTIES, METADATA, DELETION_POLICY, UPDATE_POLICY,
+ DEPENDS_ON, DESCRIPTION, EXTERNAL_ID,
+) = (
+ 'Type', 'Properties', 'Metadata', 'DeletionPolicy', 'UpdatePolicy',
+ 'DependsOn', 'Description', 'external_id',
+)
+
+
@repr_wrapper
class ResourceDefinition(object):
"""A definition of a resource, independent of any template format."""
@@ -193,6 +204,15 @@ class ResourceDefinition(object):
external_id=reparse_snippet(self._external_id),
condition=self._condition)
+ def validate(self):
+ """Validate intrinsic functions that appear in the definition."""
+ function.validate(self._properties, PROPERTIES)
+ function.validate(self._metadata, METADATA)
+ function.validate(self._depends, DEPENDS_ON)
+ function.validate(self._deletion_policy, DELETION_POLICY)
+ function.validate(self._update_policy, UPDATE_POLICY)
+ function.validate(self._external_id, EXTERNAL_ID)
+
def dep_attrs(self, resource_name, load_all=False):
"""Iterate over attributes of a given resource that this references.
@@ -230,9 +250,9 @@ class ResourceDefinition(object):
return '.'.join([self.name, section])
prop_deps = function.dependencies(self._properties,
- path('Properties'))
+ path(PROPERTIES))
metadata_deps = function.dependencies(self._metadata,
- path('Metadata'))
+ path(METADATA))
implicit_depends = six.moves.map(lambda rp: rp.name,
itertools.chain(prop_deps,
metadata_deps))
@@ -282,7 +302,7 @@ class ResourceDefinition(object):
"""
props = properties.Properties(schema, self._properties or {},
function.resolve, context=context,
- section='Properties')
+ section=PROPERTIES)
props.update_translation(self._rules, self._client_resolve)
return props
@@ -301,7 +321,7 @@ class ResourceDefinition(object):
"""
props = properties.Properties(schema, self._update_policy or {},
function.resolve, context=context,
- section='UpdatePolicy')
+ section=UPDATE_POLICY)
props.update_translation(self._rules, self._client_resolve)
return props
diff --git a/heat/engine/service.py b/heat/engine/service.py
index fb94cb6b6..be3996aeb 100644
--- a/heat/engine/service.py
+++ b/heat/engine/service.py
@@ -15,7 +15,6 @@ import collections
import datetime
import functools
import itertools
-import os
import pydoc
import socket
@@ -52,22 +51,18 @@ from heat.engine import parameter_groups
from heat.engine import properties
from heat.engine import resources
from heat.engine import service_software_config
-from heat.engine import service_stack_watch
from heat.engine import stack as parser
from heat.engine import stack_lock
from heat.engine import stk_defn
from heat.engine import support
from heat.engine import template as templatem
from heat.engine import update
-from heat.engine import watchrule
from heat.engine import worker
from heat.objects import event as event_object
from heat.objects import resource as resource_objects
from heat.objects import service as service_objects
from heat.objects import snapshot as snapshot_object
from heat.objects import stack as stack_object
-from heat.objects import watch_data
-from heat.objects import watch_rule
from heat.rpc import api as rpc_api
from heat.rpc import worker_api as rpc_worker_api
@@ -322,7 +317,6 @@ class EngineService(service.ServiceBase):
# The following are initialized here, but assigned in start() which
# happens after the fork when spawning multiple worker processes
- self.stack_watch = None
self.listener = None
self.worker_service = None
self.engine_id = None
@@ -341,35 +335,6 @@ class EngineService(service.ServiceBase):
'Please keep the same if you do not want to '
'delegate subset roles when upgrading.')
- def create_periodic_tasks(self):
- LOG.debug("Starting periodic watch tasks pid=%s", os.getpid())
- # Note with multiple workers, the parent process hasn't called start()
- # so we need to create a ThreadGroupManager here for the periodic tasks
- if self.thread_group_mgr is None:
- self.thread_group_mgr = ThreadGroupManager()
- self.stack_watch = service_stack_watch.StackWatch(
- self.thread_group_mgr)
-
- def create_watch_tasks():
- while True:
- try:
- # Create a periodic_watcher_task per-stack
- admin_context = context.get_admin_context()
- stacks = stack_object.Stack.get_all(
- admin_context,
- show_hidden=True)
- for s in stacks:
- self.stack_watch.start_watch_task(s.id, admin_context)
- LOG.info("Watch tasks created")
- return
- except Exception as e:
- LOG.error("Watch task creation attempt failed, %s", e)
- eventlet.sleep(5)
-
- if self.manage_thread_grp is None:
- self.manage_thread_grp = threadgroup.ThreadGroup()
- self.manage_thread_grp.add_thread(create_watch_tasks)
-
def start(self):
self.engine_id = service_utils.generate_engine_id()
if self.thread_group_mgr is None:
@@ -730,7 +695,7 @@ class EngineService(service.ServiceBase):
parent_resource=parent_resource_name,
**common_params)
- self.resource_enforcer.enforce_stack(stack)
+ self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
self._validate_deferred_auth_context(cnxt, stack)
is_root = stack.nested_depth == 0
stack.validate()
@@ -819,14 +784,6 @@ class EngineService(service.ServiceBase):
elif stack.status != stack.FAILED:
stack.create(msg_queue=msg_queue)
- if (stack.action in (stack.CREATE, stack.ADOPT)
- and stack.status == stack.COMPLETE):
- if self.stack_watch:
- # Schedule a periodic watcher task for this stack
- self.stack_watch.start_watch_task(stack.id, cnxt)
- else:
- LOG.info("Stack create failed, status %s", stack.status)
-
convergence = cfg.CONF.convergence_engine
stack = self._parse_template_and_validate_stack(
@@ -964,7 +921,8 @@ class EngineService(service.ServiceBase):
if invalid_params:
raise exception.ImmutableParameterModified(*invalid_params)
- self.resource_enforcer.enforce_stack(updated_stack)
+ self.resource_enforcer.enforce_stack(updated_stack,
+ is_registered_policy=True)
updated_stack.parameters.set_stack_id(current_stack.identifier())
self._validate_deferred_auth_context(cnxt, updated_stack)
@@ -999,7 +957,8 @@ class EngineService(service.ServiceBase):
cnxt, stack=db_stack, use_stored_context=True)
else:
current_stack = parser.Stack.load(cnxt, stack=db_stack)
- self.resource_enforcer.enforce_stack(current_stack)
+ self.resource_enforcer.enforce_stack(current_stack,
+ is_registered_policy=True)
if current_stack.action == current_stack.SUSPEND:
msg = _('Updating a stack when it is suspended')
@@ -1279,36 +1238,7 @@ class EngineService(service.ServiceBase):
result['ParameterGroups'] = param_groups.parameter_groups
if show_nested:
- # Note preview_resources is needed here to build the tree
- # of nested resources/stacks in memory, otherwise the
- # nested/has_nested() tests below won't work
- stack.preview_resources()
-
- def nested_params(stk):
- n_result = {}
- for r in stk:
- if stk[r].has_nested():
- n_params = stk[r].nested().parameters.map(
- api.format_validate_parameter,
- filter_func=filter_parameter)
- n_result[r] = {
- 'Type': stk[r].type(),
- 'Description': stk[r].nested().t.get(
- 'Description', ''),
- 'Parameters': n_params
- }
-
- # Add parameter_groups if it is present in nested stack
- nested_pg = parameter_groups.ParameterGroups(
- stk[r].nested().t)
- if nested_pg.parameter_groups:
- n_result[r].update({'ParameterGroups':
- nested_pg.parameter_groups})
-
- n_result[r].update(nested_params(stk[r].nested()))
- return {'NestedParameters': n_result} if n_result else {}
-
- result.update(nested_params(stack))
+ result.update(stack.get_nested_parameters(filter_parameter))
result['Environment'] = tmpl.env.user_env_as_dict()
return result
@@ -1417,7 +1347,7 @@ class EngineService(service.ServiceBase):
LOG.info('Deleting stack %s', st.name)
stack = parser.Stack.load(cnxt, stack=st)
- self.resource_enforcer.enforce_stack(stack)
+ self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
if stack.convergence and cfg.CONF.convergence_engine:
def convergence_delete():
@@ -1465,7 +1395,8 @@ class EngineService(service.ServiceBase):
def reload():
st = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=st)
- self.resource_enforcer.enforce_stack(stack)
+ self.resource_enforcer.enforce_stack(stack,
+ is_registered_policy=True)
return stack
def wait_then_delete(stack):
@@ -1642,7 +1573,8 @@ class EngineService(service.ServiceBase):
:param type_name: Name of the resource type to obtain the schema of.
:param with_description: Return result with description or not.
"""
- self.resource_enforcer.enforce(cnxt, type_name)
+ self.resource_enforcer.enforce(cnxt, type_name,
+ is_registered_policy=True)
try:
resource_class = resources.global_env().get_class(type_name)
except exception.NotFound:
@@ -1703,7 +1635,8 @@ class EngineService(service.ServiceBase):
:param type_name: Name of the resource type to generate a template for.
:param template_type: the template type to generate, cfn or hot.
"""
- self.resource_enforcer.enforce(cnxt, type_name)
+ self.resource_enforcer.enforce(cnxt, type_name,
+ is_registered_policy=True)
try:
resource_class = resources.global_env().get_class(type_name)
except exception.NotFound:
@@ -1744,12 +1677,12 @@ class EngineService(service.ServiceBase):
if nested_depth:
root_stack_identifier = st.identifier()
- # find all resources associated with a root stack
- all_r = resource_objects.Resource.get_all_by_root_stack(
- cnxt, st.id, None)
+ # find all stacks with resources associated with a root stack
+ ResObj = resource_objects.Resource
+ stack_ids = ResObj.get_all_stack_ids_by_root_stack(cnxt,
+ st.id)
# find stacks to the requested nested_depth
- stack_ids = {r.stack_id for r in six.itervalues(all_r)}
stack_filters = {
'id': stack_ids,
'nested_depth': list(range(nested_depth + 1))
@@ -2047,7 +1980,7 @@ class EngineService(service.ServiceBase):
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
- self.resource_enforcer.enforce_stack(stack)
+ self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
_stack_suspend, stack)
@@ -2061,7 +1994,7 @@ class EngineService(service.ServiceBase):
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
- self.resource_enforcer.enforce_stack(stack)
+ self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
_stack_resume, stack)
@@ -2146,7 +2079,7 @@ class EngineService(service.ServiceBase):
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
- self.resource_enforcer.enforce_stack(stack)
+ self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
snapshot = snapshot_object.Snapshot.get_snapshot_by_stack(
cnxt, snapshot_id, s)
# FIXME(pas-ha) has to be amended to deny restoring stacks
@@ -2169,106 +2102,6 @@ class EngineService(service.ServiceBase):
return [api.format_snapshot(snapshot) for snapshot in data]
@context.request_context
- def create_watch_data(self, cnxt, watch_name, stats_data):
- """Creates data for CloudWatch and WaitConditions.
-
- This could be used by CloudWatch and WaitConditions
- and treat HA service events like any other CloudWatch.
- """
- def get_matching_watches():
- if watch_name:
- yield watchrule.WatchRule.load(cnxt, watch_name)
- else:
- for wr in watch_rule.WatchRule.get_all(cnxt):
- if watchrule.rule_can_use_sample(wr, stats_data):
- yield watchrule.WatchRule.load(cnxt, watch=wr)
-
- rule_run = False
- for rule in get_matching_watches():
- rule.create_watch_data(stats_data)
- rule_run = True
-
- if not rule_run:
- if watch_name is None:
- watch_name = 'Unknown'
- raise exception.EntityNotFound(entity='Watch Rule',
- name=watch_name)
-
- return stats_data
-
- @context.request_context
- def show_watch(self, cnxt, watch_name):
- """Return the attributes of one watch/alarm.
-
- :param cnxt: RPC context.
- :param watch_name: Name of the watch you want to see, or None to see
- all.
- """
- if watch_name:
- wrn = [watch_name]
- else:
- try:
- wrn = [w.name for w in watch_rule.WatchRule.get_all(cnxt)]
- except Exception as ex:
- LOG.warning('show_watch (all) db error %s', ex)
- return
-
- wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
- result = [api.format_watch(w) for w in wrs]
- return result
-
- @context.request_context
- def show_watch_metric(self, cnxt, metric_namespace=None, metric_name=None):
- """Return the datapoints for a metric.
-
- :param cnxt: RPC context.
- :param metric_namespace: Name of the namespace you want to see, or None
- to see all.
- :param metric_name: Name of the metric you want to see, or None to see
- all.
- """
-
- # DB API and schema does not yet allow us to easily query by
- # namespace/metric, but we will want this at some point
- # for now, the API can query all metric data and filter locally
- if metric_namespace is not None or metric_name is not None:
- LOG.error("Filtering by namespace/metric not yet supported")
- return
-
- try:
- wds = watch_data.WatchData.get_all(cnxt)
- rule_names = {
- r.id: r.name for r in watch_rule.WatchRule.get_all(cnxt)
- }
- except Exception as ex:
- LOG.warning('show_metric (all) db error %s', ex)
- return
-
- result = [api.format_watch_data(w, rule_names) for w in wds]
- return result
-
- @context.request_context
- def set_watch_state(self, cnxt, watch_name, state):
- """Temporarily set the state of a given watch.
-
- :param cnxt: RPC context.
- :param watch_name: Name of the watch.
- :param state: State (must be one defined in WatchRule class.
- """
- wr = watchrule.WatchRule.load(cnxt, watch_name)
- if wr.state == rpc_api.WATCH_STATE_CEILOMETER_CONTROLLED:
- return
- actions = wr.set_watch_state(state)
- for action in actions:
- self.thread_group_mgr.start(wr.stack_id, action)
-
- # Return the watch with the state overridden to indicate success
- # We do not update the timestamps as we are not modifying the DB
- result = api.format_watch(wr)
- result[rpc_api.WATCH_STATE_VALUE] = state
- return result
-
- @context.request_context
def show_software_config(self, cnxt, config_id):
return self.software_config.show_software_config(cnxt, config_id)
@@ -2449,15 +2282,17 @@ class EngineService(service.ServiceBase):
service_objects.Service.delete(cnxt, service_ref['id'])
def reset_stack_status(self):
- cnxt = context.get_admin_context()
filters = {
'status': parser.Stack.IN_PROGRESS,
'convergence': False
}
- stacks = stack_object.Stack.get_all(cnxt,
+ stacks = stack_object.Stack.get_all(context.get_admin_context(),
filters=filters,
show_nested=True)
for s in stacks:
+ # Build one context per stack, so that it can safely be passed to
+ # to thread.
+ cnxt = context.get_admin_context()
stack_id = s.id
lock = stack_lock.StackLock(cnxt, stack_id, self.engine_id)
engine_id = lock.get_engine_id()
diff --git a/heat/engine/service_software_config.py b/heat/engine/service_software_config.py
index 6d036d1b4..aa203ae12 100644
--- a/heat/engine/service_software_config.py
+++ b/heat/engine/service_software_config.py
@@ -123,22 +123,33 @@ class SoftwareConfigService(object):
if etag:
metadata_headers = {'if-match': etag}
else:
- LOG.warning('Couldn\'t find existing Swift metadata')
+ LOG.warning("Couldn't find existing Swift metadata "
+ "for server %s", server_id)
rows_updated = db_api.resource_update(
cnxt, rs.id, {'rsrc_metadata': md}, rs.atomic_key)
if not rows_updated:
- LOG.debug('Conflict on database deployment update, retrying')
+ LOG.debug('Conflict on deployment metadata update for '
+ 'server %s; retrying', server_id)
action = _('deployments of server %s') % server_id
raise exception.ConcurrentTransaction(action=action)
+ LOG.debug('Updated deployment metadata for server %s', server_id)
+
if metadata_put_url:
json_md = jsonutils.dumps(md)
resp = requests.put(metadata_put_url, json_md,
headers=metadata_headers)
- if resp.status_code == 412:
- LOG.debug('Conflict on Swift deployment update, retrying')
+ if resp.status_code == requests.codes.precondition_failed:
+ LOG.debug('Conflict on Swift deployment update for '
+ 'server %s; retrying', server_id)
action = _('deployments of server %s') % server_id
raise exception.ConcurrentTransaction(action=action)
+ else:
+ try:
+ resp.raise_for_status()
+ except requests.HTTPError as exc:
+ LOG.error('Failed to deliver deployment data to '
+ 'server %s: %s', server_id, exc)
if metadata_queue_id:
project = stack_user_project_id
queue = self._get_zaqar_queue(cnxt, rs, project, metadata_queue_id)
diff --git a/heat/engine/service_stack_watch.py b/heat/engine/service_stack_watch.py
deleted file mode 100644
index 9e60f2e69..000000000
--- a/heat/engine/service_stack_watch.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from oslo_utils import timeutils
-
-from heat.common import context
-from heat.engine import stack
-from heat.engine import stk_defn
-from heat.engine import watchrule
-from heat.objects import stack as stack_object
-from heat.objects import watch_rule as watch_rule_object
-from heat.rpc import api as rpc_api
-
-LOG = logging.getLogger(__name__)
-
-
-class StackWatch(object):
- def __init__(self, thread_group_mgr):
- self.thread_group_mgr = thread_group_mgr
-
- def start_watch_task(self, stack_id, cnxt):
-
- def stack_has_a_watchrule(sid):
- wrs = watch_rule_object.WatchRule.get_all_by_stack(cnxt, sid)
-
- now = timeutils.utcnow()
- start_watch_thread = False
- for wr in wrs:
- # reset the last_evaluated so we don't fire off alarms when
- # the engine has not been running.
- watch_rule_object.WatchRule.update_by_id(
- cnxt, wr.id,
- {'last_evaluated': now})
-
- if wr.state != rpc_api.WATCH_STATE_CEILOMETER_CONTROLLED:
- start_watch_thread = True
-
- children = stack_object.Stack.get_all_by_owner_id(cnxt, sid)
- for child in children:
- if stack_has_a_watchrule(child.id):
- start_watch_thread = True
-
- return start_watch_thread
-
- if stack_has_a_watchrule(stack_id):
- self.thread_group_mgr.add_timer(
- stack_id,
- self.periodic_watcher_task,
- sid=stack_id)
-
- def check_stack_watches(self, sid):
- # Use admin_context for stack_get to defeat tenant
- # scoping otherwise we fail to retrieve the stack
- LOG.debug("Periodic watcher task for stack %s", sid)
- admin_context = context.get_admin_context()
- db_stack = stack_object.Stack.get_by_id(admin_context,
- sid)
- if not db_stack:
- LOG.error("Unable to retrieve stack %s for periodic task", sid)
- return
- stk = stack.Stack.load(admin_context, stack=db_stack,
- use_stored_context=True)
-
- # recurse into any nested stacks.
- children = stack_object.Stack.get_all_by_owner_id(admin_context, sid)
- for child in children:
- self.check_stack_watches(child.id)
-
- # Get all watchrules for this stack and evaluate them
- try:
- wrs = watch_rule_object.WatchRule.get_all_by_stack(admin_context,
- sid)
- except Exception as ex:
- LOG.warning('periodic_task db error watch rule removed? %s', ex)
- return
-
- def run_alarm_action(stk, actions, details):
- for action in actions:
- action(details=details)
- for res in stk._explicit_dependencies():
- res.metadata_update()
- stk_defn.update_resource_data(stk.defn, res.name,
- res.node_data())
-
- for wr in wrs:
- rule = watchrule.WatchRule.load(stk.context, watch=wr)
- actions = rule.evaluate()
- if actions:
- self.thread_group_mgr.start(sid, run_alarm_action, stk,
- actions, rule.get_details())
-
- def periodic_watcher_task(self, sid):
- """Evaluate all watch-rules defined for stack ID.
-
- Periodic task, created for each stack, triggers watch-rule evaluation
- for all rules defined for the stack sid = stack ID.
- """
- self.check_stack_watches(sid)
diff --git a/heat/engine/stack.py b/heat/engine/stack.py
index 16e5d2e55..6252fd273 100644
--- a/heat/engine/stack.py
+++ b/heat/engine/stack.py
@@ -32,6 +32,7 @@ from heat.common import exception
from heat.common.i18n import _
from heat.common import identifier
from heat.common import lifecycle_plugin_utils
+from heat.engine import api
from heat.engine import dependencies
from heat.engine import environment
from heat.engine import event
@@ -389,7 +390,10 @@ class Stack(collections.Mapping):
elif stk_def_cache and tid in stk_def_cache:
stk_def = stk_def_cache[tid]
else:
- t = tmpl.Template.load(self.context, tid)
+ try:
+ t = tmpl.Template.load(self.context, tid)
+ except exception.NotFound:
+ return None
stk_def = self.defn.clone_with_new_template(t,
self.identifier())
if stk_def_cache is not None:
@@ -904,13 +908,15 @@ class Stack(collections.Mapping):
for op_name, output in six.iteritems(self.outputs):
try:
- path = '.'.join([self.t.OUTPUTS, op_name,
- self.t.OUTPUT_VALUE])
- output.validate(path)
- except exception.StackValidationFailed:
- raise
- except AssertionError:
- raise
+ output.validate()
+ except exception.StackValidationFailed as ex:
+ path = [self.t.OUTPUTS, op_name,
+ self.t.get_section_name(ex.path[0])]
+ path.extend(ex.path[1:])
+ raise exception.StackValidationFailed(
+ error=ex.error,
+ path=path,
+ message=ex.error_message)
def requires_deferred_auth(self):
"""Determine whether to perform API requests with deferred auth.
@@ -964,13 +970,11 @@ class Stack(collections.Mapping):
# delete, stack lock is not used, hence persist state
updated = self._persist_state()
if not updated:
- # Possibly failed concurrent update
- LOG.warning("Failed to set state of stack %(name)s with"
- " traversal ID %(trvsl_id)s, to"
- " %(action)s_%(status)s",
- {'name': self.name,
- 'trvsl_id': self.current_traversal,
- 'action': action, 'status': status})
+ LOG.info("Stack %(name)s traversal %(trvsl_id)s no longer "
+ "active; not setting state to %(action)s_%(status)s",
+ {'name': self.name,
+ 'trvsl_id': self.current_traversal,
+ 'action': action, 'status': status})
return updated
# Persist state to db only if status == IN_PROGRESS
@@ -1048,6 +1052,36 @@ class Stack(collections.Mapping):
return [resource.preview()
for resource in six.itervalues(self.resources)]
+ def get_nested_parameters(self, filter_func):
+ """Return nested parameters schema, if any.
+
+ This introspects the resources to return the parameters of the nested
+ stacks. It uses the `get_nested_parameters_stack` API to build the
+ stack.
+ """
+ result = {}
+ for name, rsrc in six.iteritems(self.resources):
+ nested = rsrc.get_nested_parameters_stack()
+ if nested is None:
+ continue
+ nested_params = nested.parameters.map(
+ api.format_validate_parameter,
+ filter_func=filter_func)
+ params = {
+ 'Type': rsrc.type(),
+ 'Description': nested.t.get('Description', ''),
+ 'Parameters': nested_params
+ }
+
+ # Add parameter_groups if it is present in nested stack
+ nested_pg = param_groups.ParameterGroups(nested.t)
+ if nested_pg.parameter_groups:
+ params.update({'ParameterGroups': nested_pg.parameter_groups})
+
+ params.update(nested.get_nested_parameters(filter_func))
+ result[name] = params
+ return {'NestedParameters': result} if result else {}
+
def _store_resources(self):
for r in reversed(self.dependencies):
if r.action == r.INIT:
@@ -1343,10 +1377,11 @@ class Stack(collections.Mapping):
self.context, self.id, self.current_traversal, True, self.id)
leaves = set(self.convergence_dependencies.leaves())
- if not any(leaves):
+ if not leaves:
self.mark_complete()
else:
- for rsrc_id, is_update in self.convergence_dependencies.leaves():
+ for rsrc_id, is_update in sorted(leaves,
+ key=lambda n: n.is_update):
if is_update:
LOG.info("Triggering resource %s for update", rsrc_id)
else:
@@ -1383,7 +1418,7 @@ class Stack(collections.Mapping):
def _get_best_existing_rsrc_db(self, rsrc_name):
candidate = None
if self.ext_rsrcs_db:
- for id, ext_rsrc in self.ext_rsrcs_db.items():
+ for ext_rsrc in self.ext_rsrcs_db.values():
if ext_rsrc.name != rsrc_name:
continue
if ext_rsrc.current_template_id == self.t.id:
@@ -1564,8 +1599,7 @@ class Stack(collections.Mapping):
finally:
self.reset_dependencies()
- if action in (self.UPDATE, self.RESTORE, self.ROLLBACK):
- self.status_reason = 'Stack %s completed successfully' % action
+ self.status_reason = 'Stack %s completed successfully' % action
self.status = self.COMPLETE
except scheduler.Timeout:
@@ -1984,44 +2018,6 @@ class Stack(collections.Mapping):
action=self.RESTORE)
updater()
- def restart_resource(self, resource_name):
- """Restart the resource specified by resource_name.
-
- stop resource_name and all that depend on it
- start resource_name and all that depend on it
- """
- warnings.warn("Stack.restart_resource() is horribly broken and will "
- "never be fixed. If you're using it in a resource type "
- "other than HARestarter, don't. And don't use "
- "HARestarter either.",
- DeprecationWarning)
-
- deps = self.dependencies[self[resource_name]]
- failed = False
-
- for res in reversed(deps):
- try:
- scheduler.TaskRunner(res.destroy)()
- except exception.ResourceFailure as ex:
- failed = True
- LOG.info('Resource %(name)s delete failed: %(ex)s',
- {'name': res.name, 'ex': ex})
-
- for res in deps:
- if not failed:
- try:
- res.state_reset()
- scheduler.TaskRunner(res.create)()
- except exception.ResourceFailure as ex:
- failed = True
- LOG.info('Resource %(name)s create failed: '
- '%(ex)s', {'name': res.name, 'ex': ex})
- else:
- res.state_set(res.CREATE, res.FAILED,
- 'Resource restart aborted')
- # TODO(asalkeld) if any of this fails we Should
- # restart the whole stack
-
def get_availability_zones(self):
nova = self.clients.client('nova')
if self._zones is None:
diff --git a/heat/engine/sync_point.py b/heat/engine/sync_point.py
index 0ea8dd204..09f92365d 100644
--- a/heat/engine/sync_point.py
+++ b/heat/engine/sync_point.py
@@ -13,9 +13,8 @@
# limitations under the License.
import ast
-import eventlet
-import random
import six
+import tenacity
from oslo_log import log as logging
@@ -116,25 +115,55 @@ def serialize_input_data(input_data):
return {'input_data': _serialize(input_data)}
+class wait_random_exponential(tenacity.wait_exponential):
+ """Random wait strategy with a geometrically increasing amount of jitter.
+
+ Implements the truncated binary exponential backoff algorithm as used in
+ e.g. CSMA media access control. The retry occurs at a random time in a
+ (geometrically) expanding interval constrained by minimum and maximum
+ limits.
+ """
+ def __init__(self, min=0, multiplier=1, max=tenacity._utils.MAX_WAIT,
+ exp_base=2):
+ super(wait_random_exponential, self).__init__(multiplier=multiplier,
+ max=(max-min),
+ exp_base=exp_base)
+ self._random = tenacity.wait_random(min=min, max=(min + multiplier))
+
+ def __call__(self, previous_attempt_number, delay_since_first_attempt):
+ jitter = super(wait_random_exponential,
+ self).__call__(previous_attempt_number,
+ delay_since_first_attempt)
+ self._random.wait_random_max = self._random.wait_random_min + jitter
+ return self._random(previous_attempt_number, delay_since_first_attempt)
+
+
def sync(cnxt, entity_id, current_traversal, is_update, propagate,
predecessors, new_data):
- rows_updated = None
- sync_point = None
- input_data = None
- nconflicts = max(0, len(predecessors) - 2)
- # limit to 10 seconds
- max_wt = min(nconflicts * 0.01, 10)
- while not rows_updated:
+ # Retry waits up to 60 seconds at most, with exponentially increasing
+ # amounts of jitter per resource still outstanding
+ wait_strategy = wait_random_exponential(max=60)
+
+ def init_jitter(existing_input_data):
+ nconflicts = max(0, len(predecessors) - len(existing_input_data) - 1)
+ # 10ms per potential conflict, up to a max of 10s in total
+ return min(nconflicts, 1000) * 0.01
+
+ @tenacity.retry(
+ retry=tenacity.retry_if_result(lambda r: r is None),
+ wait=wait_strategy
+ )
+ def _sync():
sync_point = get(cnxt, entity_id, current_traversal, is_update)
input_data = deserialize_input_data(sync_point.input_data)
+ wait_strategy.multiplier = init_jitter(input_data)
input_data.update(new_data)
rows_updated = update_input_data(
cnxt, entity_id, current_traversal, is_update,
sync_point.atomic_key, serialize_input_data(input_data))
- # don't aggressively spin; induce some sleep
- if not rows_updated:
- eventlet.sleep(random.uniform(0, max_wt))
+ return input_data if rows_updated else None
+ input_data = _sync()
waiting = predecessors - set(input_data)
key = make_key(entity_id, current_traversal, is_update)
if waiting:
diff --git a/heat/engine/template.py b/heat/engine/template.py
index cb5551c4e..8493e4069 100644
--- a/heat/engine/template.py
+++ b/heat/engine/template.py
@@ -213,7 +213,16 @@ class Template(collections.Mapping):
@abc.abstractmethod
def get_section_name(self, section):
- """Return a correct section name."""
+ """Get the name of a field within a resource or output definition.
+
+ Return the name of the given field (specified by the constants given
+ in heat.engine.rsrc_defn and heat.engine.output) in the template
+ format. This is used in error reporting to help users find the
+ location of errors in the template.
+
+ Note that 'section' here does not refer to a top-level section of the
+ template (like parameters, resources, &c.) as it does everywhere else.
+ """
pass
@abc.abstractmethod
diff --git a/heat/engine/template_files.py b/heat/engine/template_files.py
index 184c9d392..844a5fbe4 100644
--- a/heat/engine/template_files.py
+++ b/heat/engine/template_files.py
@@ -76,7 +76,7 @@ class TemplateFiles(collections.Mapping):
def __iter__(self):
self._refresh_if_needed()
- if self.files_id is None:
+ if self.files is None:
return iter(ReadOnlyDict({}))
return iter(self.files)
diff --git a/heat/engine/translation.py b/heat/engine/translation.py
index 90d8083ea..d6f1fe60c 100644
--- a/heat/engine/translation.py
+++ b/heat/engine/translation.py
@@ -197,8 +197,7 @@ class Translation(object):
return (self.is_active and
(key in self._rules or key in self.resolved_translations))
- def translate(self, key, prop_value=None, prop_data=None, validate=False,
- template=None):
+ def translate(self, key, prop_value=None, prop_data=None, validate=False):
if key in self.resolved_translations:
return self.resolved_translations[key]
@@ -212,12 +211,10 @@ class Translation(object):
result = None
if rule.rule == TranslationRule.REPLACE:
- result = self.replace(key, rule, result, prop_data, validate,
- template)
+ result = self.replace(key, rule, result, prop_data, validate)
if rule.rule == TranslationRule.ADD:
- result = self.add(key, rule, result, prop_data, validate,
- template)
+ result = self.add(key, rule, result, prop_data, validate)
if rule.rule == TranslationRule.RESOLVE:
resolved_value = resolve_and_find(result,
@@ -231,7 +228,7 @@ class Translation(object):
return result
def add(self, key, add_rule, prop_value=None, prop_data=None,
- validate=False, template=None):
+ validate=False):
value_path = add_rule.get_value_absolute_path()
if prop_value is None:
prop_value = []
@@ -252,8 +249,7 @@ class Translation(object):
value = get_value(value_path,
prop_data if add_rule.value_name else
self.properties,
- validate,
- template)
+ validate)
self.is_active = True
if value is not None:
translation_value.extend(value if isinstance(value, list)
@@ -264,7 +260,7 @@ class Translation(object):
return translation_value
def replace(self, key, replace_rule, prop_value=None, prop_data=None,
- validate=False, template=None):
+ validate=False):
value = None
value_path = replace_rule.get_value_absolute_path(full_value_name=True)
short_path = replace_rule.get_value_absolute_path()
@@ -280,7 +276,7 @@ class Translation(object):
subpath = value_path
props = prop_data if replace_rule.value_name else self.properties
self.is_active = False
- value = get_value(subpath, props, validate, template)
+ value = get_value(subpath, props, validate)
self.is_active = True
if self.has_translation(prop_path):
@@ -304,7 +300,7 @@ class Translation(object):
return result
-def get_value(path, props, validate=False, template=None):
+def get_value(path, props, validate=False):
if not props:
return None
@@ -312,7 +308,7 @@ def get_value(path, props, validate=False, template=None):
if isinstance(props, dict):
prop = props.get(key)
else:
- prop = props._get_property_value(key, validate, template)
+ prop = props._get_property_value(key, validate)
if len(path[1:]) == 0:
return prop
elif prop is None:
diff --git a/heat/engine/watchrule.py b/heat/engine/watchrule.py
deleted file mode 100644
index 6f72b8676..000000000
--- a/heat/engine/watchrule.py
+++ /dev/null
@@ -1,396 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import datetime
-
-from oslo_log import log as logging
-from oslo_utils import timeutils
-
-from heat.common import exception
-from heat.common.i18n import _
-from heat.engine import stack
-from heat.engine import timestamp
-from heat.objects import stack as stack_object
-from heat.objects import watch_data as watch_data_objects
-from heat.objects import watch_rule as watch_rule_objects
-from heat.rpc import api as rpc_api
-
-LOG = logging.getLogger(__name__)
-
-
-class WatchRule(object):
- WATCH_STATES = (
- ALARM,
- NORMAL,
- NODATA,
- SUSPENDED,
- CEILOMETER_CONTROLLED,
- ) = (
- rpc_api.WATCH_STATE_ALARM,
- rpc_api.WATCH_STATE_OK,
- rpc_api.WATCH_STATE_NODATA,
- rpc_api.WATCH_STATE_SUSPENDED,
- rpc_api.WATCH_STATE_CEILOMETER_CONTROLLED,
- )
- ACTION_MAP = {ALARM: 'AlarmActions',
- NORMAL: 'OKActions',
- NODATA: 'InsufficientDataActions'}
-
- created_at = timestamp.Timestamp(watch_rule_objects.WatchRule.get_by_id,
- 'created_at')
- updated_at = timestamp.Timestamp(watch_rule_objects.WatchRule.get_by_id,
- 'updated_at')
-
- def __init__(self, context, watch_name, rule, stack_id=None,
- state=NODATA, wid=None, watch_data=None,
- last_evaluated=None):
-
- self.context = context
- self.now = timeutils.utcnow()
- self.name = watch_name
- self.state = state
- self.rule = rule
- self.stack_id = stack_id
- period = 0
- if 'Period' in rule:
- period = int(rule['Period'])
- elif 'period' in rule:
- period = int(rule['period'])
- self.timeperiod = datetime.timedelta(seconds=period)
- self.id = wid
- self.watch_data = watch_data or []
- self.last_evaluated = last_evaluated or timeutils.utcnow()
-
- @classmethod
- def load(cls, context, watch_name=None, watch=None):
- """Load the watchrule object.
-
- The object can be loaded either from the DB by name or from an existing
- DB object.
- """
- if watch is None:
- try:
- watch = watch_rule_objects.WatchRule.get_by_name(context,
- watch_name)
- except Exception as ex:
- LOG.warning('WatchRule.load (%(watch_name)s) db error %(ex)s',
- {'watch_name': watch_name, 'ex': ex})
- if watch is None:
- raise exception.EntityNotFound(entity='Watch Rule',
- name=watch_name)
- else:
- return cls(context=context,
- watch_name=watch.name,
- rule=watch.rule,
- stack_id=watch.stack_id,
- state=watch.state,
- wid=watch.id,
- watch_data=watch.watch_data,
- last_evaluated=watch.last_evaluated)
-
- def store(self):
- """Store the watchrule in the database and return its ID.
-
- If self.id is set, we update the existing rule.
- """
-
- wr_values = {
- 'name': self.name,
- 'rule': self.rule,
- 'state': self.state,
- 'stack_id': self.stack_id
- }
-
- if self.id is None:
- wr = watch_rule_objects.WatchRule.create(self.context, wr_values)
- self.id = wr.id
- else:
- watch_rule_objects.WatchRule.update_by_id(self.context, self.id,
- wr_values)
-
- def destroy(self):
- """Delete the watchrule from the database."""
- if self.id is not None:
- watch_rule_objects.WatchRule.delete(self.context, self.id)
-
- def do_data_cmp(self, data, threshold):
- op = self.rule['ComparisonOperator']
- if op == 'GreaterThanThreshold':
- return data > threshold
- elif op == 'GreaterThanOrEqualToThreshold':
- return data >= threshold
- elif op == 'LessThanThreshold':
- return data < threshold
- elif op == 'LessThanOrEqualToThreshold':
- return data <= threshold
- else:
- return False
-
- def do_Maximum(self):
- data = 0
- have_data = False
- for d in self.watch_data:
- if d.created_at < self.now - self.timeperiod:
- continue
- if not have_data:
- data = float(d.data[self.rule['MetricName']]['Value'])
- have_data = True
- if float(d.data[self.rule['MetricName']]['Value']) > data:
- data = float(d.data[self.rule['MetricName']]['Value'])
-
- if not have_data:
- return self.NODATA
-
- if self.do_data_cmp(data,
- float(self.rule['Threshold'])):
- return self.ALARM
- else:
- return self.NORMAL
-
- def do_Minimum(self):
- data = 0
- have_data = False
- for d in self.watch_data:
- if d.created_at < self.now - self.timeperiod:
- continue
- if not have_data:
- data = float(d.data[self.rule['MetricName']]['Value'])
- have_data = True
- elif float(d.data[self.rule['MetricName']]['Value']) < data:
- data = float(d.data[self.rule['MetricName']]['Value'])
-
- if not have_data:
- return self.NODATA
-
- if self.do_data_cmp(data,
- float(self.rule['Threshold'])):
- return self.ALARM
- else:
- return self.NORMAL
-
- def do_SampleCount(self):
- """Count all samples within the specified period."""
- data = 0
- for d in self.watch_data:
- if d.created_at < self.now - self.timeperiod:
- continue
- data = data + 1
-
- if self.do_data_cmp(data,
- float(self.rule['Threshold'])):
- return self.ALARM
- else:
- return self.NORMAL
-
- def do_Average(self):
- data = 0
- samples = 0
- for d in self.watch_data:
- if d.created_at < self.now - self.timeperiod:
- continue
- samples = samples + 1
- data = data + float(d.data[self.rule['MetricName']]['Value'])
-
- if samples == 0:
- return self.NODATA
-
- data = data / samples
- if self.do_data_cmp(data,
- float(self.rule['Threshold'])):
- return self.ALARM
- else:
- return self.NORMAL
-
- def do_Sum(self):
- data = 0
- for d in self.watch_data:
- if d.created_at < self.now - self.timeperiod:
- LOG.debug('ignoring %s', str(d.data))
- continue
- data = data + float(d.data[self.rule['MetricName']]['Value'])
-
- if self.do_data_cmp(data,
- float(self.rule['Threshold'])):
- return self.ALARM
- else:
- return self.NORMAL
-
- def get_alarm_state(self):
- fn = getattr(self, 'do_%s' % self.rule['Statistic'])
- return fn()
-
- def evaluate(self):
- if self.state in [self.CEILOMETER_CONTROLLED, self.SUSPENDED]:
- return []
- # has enough time progressed to run the rule
- self.now = timeutils.utcnow()
- if self.now < (self.last_evaluated + self.timeperiod):
- return []
- return self.run_rule()
-
- def get_details(self):
- return {'alarm': self.name,
- 'state': self.state}
-
- def run_rule(self):
- new_state = self.get_alarm_state()
- actions = self.rule_actions(new_state)
- self.state = new_state
-
- self.last_evaluated = self.now
- self.store()
- return actions
-
- def rule_actions(self, new_state):
- LOG.info('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
- 'new_state:%(new_state)s', {'stack': self.stack_id,
- 'watch_name': self.name,
- 'new_state': new_state})
- actions = []
- if self.ACTION_MAP[new_state] not in self.rule:
- LOG.info('no action for new state %s', new_state)
- else:
- s = stack_object.Stack.get_by_id(
- self.context,
- self.stack_id)
- stk = stack.Stack.load(self.context, stack=s)
- if (stk.action != stk.DELETE
- and stk.status == stk.COMPLETE):
- for refid in self.rule[self.ACTION_MAP[new_state]]:
- actions.append(stk.resource_by_refid(refid).signal)
- else:
- LOG.warning("Could not process watch state %s for stack",
- new_state)
- return actions
-
- def _to_ceilometer(self, data):
- clients = self.context.clients
- sample = {}
- sample['counter_type'] = 'gauge'
-
- for k, d in iter(data.items()):
- if k == 'Namespace':
- continue
- sample['counter_name'] = k
- sample['counter_volume'] = d['Value']
- sample['counter_unit'] = d['Unit']
- dims = d.get('Dimensions', {})
- if isinstance(dims, list):
- dims = dims[0]
- sample['resource_metadata'] = dims
- sample['resource_id'] = dims.get('InstanceId')
- LOG.debug('new sample:%(k)s data:%(sample)s', {
- 'k': k, 'sample': sample})
- clients.client('ceilometer').samples.create(**sample)
-
- def create_watch_data(self, data):
- if self.state == self.CEILOMETER_CONTROLLED:
- # this is a short term measure for those that have cfn-push-stats
- # within their templates, but want to use Ceilometer alarms.
-
- self._to_ceilometer(data)
- return
-
- if self.state == self.SUSPENDED:
- LOG.debug('Ignoring metric data for %s, SUSPENDED state',
- self.name)
- return []
-
- if self.rule['MetricName'] not in data:
- # Our simplified cloudwatch implementation only expects a single
- # Metric associated with each alarm, but some cfn-push-stats
- # options, e.g --haproxy try to push multiple metrics when we
- # actually only care about one (the one we're alarming on)
- # so just ignore any data which doesn't contain MetricName
- LOG.debug('Ignoring metric data (only accept %(metric)s) '
- ': %(data)s' % {'metric': self.rule['MetricName'],
- 'data': data})
- return
-
- watch_data = {
- 'data': data,
- 'watch_rule_id': self.id
- }
- wd = watch_data_objects.WatchData.create(self.context, watch_data)
- LOG.debug('new watch:%(name)s data:%(data)s'
- % {'name': self.name, 'data': str(wd.data)})
-
- def state_set(self, state):
- """Persistently store the watch state."""
- if state not in self.WATCH_STATES:
- raise ValueError(_("Invalid watch state %s") % state)
-
- self.state = state
- self.store()
-
- def set_watch_state(self, state):
- """Temporarily set the watch state.
-
- :returns: list of functions to be scheduled in the stack ThreadGroup
- for the specified state.
- """
-
- if state not in self.WATCH_STATES:
- raise ValueError(_('Unknown watch state %s') % state)
-
- actions = []
- if state != self.state:
- actions = self.rule_actions(state)
- if actions:
- LOG.debug("Overriding state %(self_state)s for watch "
- "%(name)s with %(state)s"
- % {'self_state': self.state, 'name': self.name,
- 'state': state})
- else:
- LOG.warning("Unable to override state %(state)s for "
- "watch %(name)s", {'state': self.state,
- 'name': self.name})
- return actions
-
-
-def rule_can_use_sample(wr, stats_data):
- def match_dimesions(rule, data):
- for k, v in iter(rule.items()):
- if k not in data:
- return False
- elif v != data[k]:
- return False
- return True
-
- if wr.state == WatchRule.SUSPENDED:
- return False
- if wr.state == WatchRule.CEILOMETER_CONTROLLED:
- metric = wr.rule['meter_name']
- rule_dims = {}
- for k, v in iter(wr.rule.get('matching_metadata', {}).items()):
- name = k.split('.')[-1]
- rule_dims[name] = v
- else:
- metric = wr.rule['MetricName']
- rule_dims = dict((d['Name'], d['Value'])
- for d in wr.rule.get('Dimensions', []))
-
- if metric not in stats_data:
- return False
-
- for k, v in iter(stats_data.items()):
- if k == 'Namespace':
- continue
- if k == metric:
- data_dims = v.get('Dimensions', {})
- if isinstance(data_dims, list):
- data_dims = data_dims[0]
- if match_dimesions(rule_dims, data_dims):
- return True
- return False
diff --git a/heat/httpd/files/heat-api-cloudwatch-uwsgi.ini b/heat/httpd/files/heat-api-cloudwatch-uwsgi.ini
deleted file mode 100644
index 1726e667a..000000000
--- a/heat/httpd/files/heat-api-cloudwatch-uwsgi.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-[uwsgi]
-chmod-socket = 666
-lazy-apps = true
-add-header = Connection: close
-buffer-size = 65535
-thunder-lock = true
-plugins = python
-enable-threads = true
-exit-on-reload = true
-die-on-term = true
-master = true
-processes = 4
-http = 127.0.0.1:80997
-wsgi-file = /usr/local/bin/heat-wsgi-api-cloudwatch
diff --git a/heat/httpd/files/heat-api-cloudwatch.conf b/heat/httpd/files/heat-api-cloudwatch.conf
deleted file mode 100644
index c86d9ee45..000000000
--- a/heat/httpd/files/heat-api-cloudwatch.conf
+++ /dev/null
@@ -1,28 +0,0 @@
-Listen %PUBLICPORT%
-
-<VirtualHost *:%PUBLICPORT%>
- WSGIDaemonProcess heat-api-cloudwatch processes=%API_WORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup heat-api-cloudwatch
- WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cloudwatch
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- AllowEncodedSlashes On
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/%APACHE_NAME%/heat_api_cloudwatch.log
- CustomLog /var/log/%APACHE_NAME%/heat_api_cloudwatch_access.log combined
- %SSLENGINE%
- %SSLCERTFILE%
- %SSLKEYFILE%
-
- <Directory %HEAT_BIN_DIR%>
- <IfVersion >= 2.4>
- Require all granted
- </IfVersion>
- <IfVersion < 2.4>
- Order allow,deny
- Allow from all
- </IfVersion>
- </Directory>
-</VirtualHost>
diff --git a/heat/httpd/files/uwsgi-heat-api-cloudwatch.conf b/heat/httpd/files/uwsgi-heat-api-cloudwatch.conf
deleted file mode 100644
index bc1789708..000000000
--- a/heat/httpd/files/uwsgi-heat-api-cloudwatch.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-KeepAlive Off
-ProxyPass "/heat-api-cloudwatch" "http://127.0.0.1:80997" retry=0
diff --git a/heat/httpd/heat_api_cloudwatch.py b/heat/httpd/heat_api_cloudwatch.py
deleted file mode 100644
index 646545e58..000000000
--- a/heat/httpd/heat_api_cloudwatch.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""WSGI script for heat-api-cloudwatch.
-
-Script for running heat-api-cloudwatch under Apache2.
-"""
-
-
-from oslo_config import cfg
-import oslo_i18n as i18n
-from oslo_log import log as logging
-
-from heat.common import config
-from heat.common import messaging
-from heat.common import profiler
-from heat import version
-
-
-def init_application():
- i18n.enable_lazy()
-
- LOG = logging.getLogger('heat.api.cloudwatch')
-
- logging.register_options(cfg.CONF)
- cfg.CONF(project='heat',
- prog='heat-api-cloudwatch',
- version=version.version_info.version_string())
- logging.setup(cfg.CONF, 'heat-api-cloudwatch')
- logging.set_defaults()
- config.set_config_defaults()
- messaging.setup()
-
- port = cfg.CONF.heat_api_cloudwatch.bind_port
- host = cfg.CONF.heat_api_cloudwatch.bind_host
- LOG.info('Starting Heat CloudWatch API on %(host)s:%(port)s',
- {'host': host, 'port': port})
- profiler.setup('heat-api-cloudwatch', host)
-
- return config.load_paste_app()
diff --git a/heat/locale/de/LC_MESSAGES/heat.po b/heat/locale/de/LC_MESSAGES/heat.po
index 8e453c285..adb85afb9 100644
--- a/heat/locale/de/LC_MESSAGES/heat.po
+++ b/heat/locale/de/LC_MESSAGES/heat.po
@@ -9,9 +9,9 @@
# Robert Simai <robert.simai@suse.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-17 05:29+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -408,22 +408,6 @@ msgstr ""
"Eine Liste mit Zugriffsregeln, die den Zugriff von IP auf "
"Freigabeverzeichnis definieren."
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr ""
-"Eine Liste der Aktionen, die auszuführen sind, wenn der Status in 'alarm' "
-"übergeht. "
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr ""
-"Eine Liste der Aktionen, die auszuführen sind, wenn der Status in "
-"'insufficient-data' übergeht. "
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr ""
-"Eine Liste der Aktionen, die auszuführen sind, wenn der Status in 'ok' "
-"übergeht. "
-
msgid "A list of all rules for the QoS policy."
msgstr "Eine Liste aller Regeln für die QoS-Richtlinie."
@@ -450,12 +434,6 @@ msgstr "Eine Liste der Clusterinstanz-IPs."
msgid "A list of clusters to which this policy is attached."
msgstr "Eine Liste der Cluster, der diese Richtlinie zugeordnet wurde."
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr ""
-"Eine Liste der Dimensionen (beliebige Name/Wert-Paare), die dem Messwert "
-"zugeordnet sind. "
-
msgid "A list of host route dictionaries for the subnet."
msgstr "Eine Liste der Host-Routenverzeichnisse für das Subnetz."
@@ -592,9 +570,6 @@ msgstr ""
"Eine signierte URL zum Erstellen von Ausführungen für Arbeitsabläufe, die in "
"der Arbeitsablaufressource angegeben sind."
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "Eine signierte URL zum Verarbeiten des Alarms (Heat-Erweiterung)."
-
msgid "A signed url to handle the alarm."
msgstr "Eine signierte URL zum Verarbeiten des Alarms."
@@ -666,9 +641,6 @@ msgstr "AccessPolicy-Ressource %s befindet sich nicht im Stack"
msgid "Action %s not allowed for user"
msgstr "Aktion %s nicht erlaubt für Benutzer"
-msgid "Action for the RBAC policy."
-msgstr "Aktion für die RBAC-Richtlinie."
-
msgid "Action to be performed on the traffic matching the rule."
msgstr ""
"Auf den mit der Regel übereinstimmenden Datenverkehr anzuwendende Aktion."
@@ -983,10 +955,6 @@ msgstr ""
"nicht vorhanden ist"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "Versuch, folgende watch_rule zu löschen: %(id)s %(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "Versuch, Stack mit der folgenden ID zu aktualisieren: %(id)s %(msg)s"
@@ -996,11 +964,6 @@ msgstr ""
"Versuch, Stack mit der folgenden ID zu aktualisieren: %(id)s %(traversal)s "
"%(msg)s"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr ""
-"Versuch, Ãœberwachung mit der folgenden ID zu aktualisieren: %(id)s %(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "Versuch, stored_context ohne user_creds zu verwenden"
@@ -2742,9 +2705,6 @@ msgstr ""
msgid "Instance ID to associate with EIP."
msgstr "Instanz-ID, die dieser EIP zuzuordnen ist. "
-msgid "Instance ID to be restarted."
-msgstr "Erneut zu startende Instanz-ID. "
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr ""
"Instanzverbindung zu CFN/CW-API-Validierungszertifikaten, wenn SSL verwendet "
@@ -2802,14 +2762,6 @@ msgid "Invalid UUID version (%d)"
msgstr "Ungültige UUID-Version (%d)"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr ""
-"Ungültige Aktion %(action)s für Objekttyp %(obj_type)s. Gültige Aktionen :"
-"%(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "Ungültige Aktion %s"
@@ -2896,11 +2848,6 @@ msgstr ""
"Container- und Plattenformate übereinstimmen."
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr ""
-"Ungültiger 'object_type': %(obj_type)s. Gültiger 'object_type': %(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr ""
"Ungültige Parameterbedingungen für Parameter %s, erwartet wurde eine Liste"
@@ -2931,10 +2878,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "Ungültiger Stackname %s. Muss eine Zeichenfolge sein."
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "Ungültiger Status %(state)s, erwartet wird einer der Status %(expect)s"
-
-#, python-format
msgid "Invalid status %s"
msgstr "Ungültiger Status %s"
@@ -2974,10 +2917,6 @@ msgstr "Ungültige Zeitzone: %s"
msgid "Invalid type (%s)"
msgstr "Ungültiger Typ (%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "Ungültiger Beobachtungsstatus %s"
-
msgid "Ip allocation pools and their ranges."
msgstr "IP-Zuordnungspools und die zugehörigen Bereiche. "
@@ -3467,9 +3406,6 @@ msgstr "Implementierungsmethode der Funktion für die Sitzungspersistenz. "
msgid "Metric name watched by the alarm."
msgstr "Vom Alarm überwachter Messwertname. "
-msgid "Metric statistic to evaluate."
-msgstr "Auszuwertende Messdatenstatistik. "
-
msgid "Min size of the cluster."
msgstr "Mindestgröße des Clusters."
@@ -3919,9 +3855,6 @@ msgstr ""
"Namen der Datenbanken, auf die diese Benutzer bei der Instanzerstellung "
"zugreifen können. "
-msgid "Namespace for the metric."
-msgstr "Namensbereich für den Messwert. "
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -4168,11 +4101,6 @@ msgstr ""
"Operator, der zum Vergleichen der angegebenen Statistik mit dem "
"Schwellenwert verwendet wird. "
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr ""
-"Operator, der zum Vergleichen der angegebenen Statistik mit dem "
-"Schwellenwert verwendet wird. "
-
msgid "Optional CA cert file to use in SSL connections."
msgstr ""
"Optionale Zertifizierungsdatei der Zertifizierungsstelle, die in SSL-"
@@ -5687,10 +5615,6 @@ msgstr "Der VIP-Port des Loadbalancers."
msgid "The VIP subnet of the LoadBalancer."
msgstr "Das VIP-Subnetz des Loadbalancers."
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "Die Ãœberwachungsregel (%(watch_name)s) konnte nicht gefunden werden."
-
msgid "The action or operation requested is invalid"
msgstr "Die angeforderte Aktion oder der angeforderte Vorgang ist ungültig"
@@ -6000,10 +5924,6 @@ msgstr ""
"nicht aktualisiert werden: %(keys)s"
#, python-format
-msgid "The following resource types could not be found: %s"
-msgstr "Die folgenden Ressourcetypen konnten nicht gefunden werden: %s"
-
-#, python-format
msgid "The function \"%s\" is invalid in this context"
msgstr "Die Funktion \"%s\" is in diesem Zusammenhang ungültig"
@@ -7208,9 +7128,6 @@ msgid "Unique identifier of the firewall policy used to create the firewall."
msgstr ""
"Eindeutige ID der zum Erstellen der Firewall verwendeten Firewallrichtlinie."
-msgid "Unit for the metric."
-msgstr "Einheit für den Messwert. "
-
msgid "Unknown"
msgstr "Unbekannt"
@@ -7255,10 +7172,6 @@ msgid "Unknown status: %s"
msgstr "Unbekannter Status: %s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "Unbekannter Beobachtungsstatus %s"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -7408,9 +7321,6 @@ msgstr "Wert '%s' ist keine Ganzzahl"
msgid "Value must be a comma-delimited list string: %s"
msgstr "Der Wert muss eine durch Kommas begrenzte Zeichenfolge sein: %s"
-msgid "Value must be a string"
-msgstr "Wert muss eine Zeichenkette sein"
-
#, python-format
msgid "Value must be of type %s"
msgstr "Wert muss vom Typ %s sein"
diff --git a/heat/locale/es/LC_MESSAGES/heat.po b/heat/locale/es/LC_MESSAGES/heat.po
index 6ae1d75b8..57a22d271 100644
--- a/heat/locale/es/LC_MESSAGES/heat.po
+++ b/heat/locale/es/LC_MESSAGES/heat.po
@@ -8,9 +8,9 @@
# Omar Rivera <gr113x@att.com>, 2017. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-17 05:29+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -395,17 +395,6 @@ msgstr ""
"Una lista de reglas de acceso que definen el acceso desde la IP al recurso "
"compartido."
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr "Una lista de acciones a ejecutar cuando el estado pasa a alarma."
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr ""
-"Una lista de acciones a ejecutar cuando el estado pasa a datos insuficientes."
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr "Una lista de acciones a ejecutar cuando el estado pasa a correcto."
-
msgid "A list of all rules for the QoS policy."
msgstr "Lista de todas las reglas de de la política de QoS."
@@ -432,12 +421,6 @@ msgstr "Una lista de ID de instancias de clúster.."
msgid "A list of clusters to which this policy is attached."
msgstr "Una lista de clústeres a los que estña conectada esta política."
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr ""
-"Una lista de dimensiones (par de nombre/valor arbitrario) asociado con la "
-"medida."
-
msgid "A list of host route dictionaries for the subnet."
msgstr "Una lista de diccionarios de ruta de host para la subred."
@@ -574,9 +557,6 @@ msgstr ""
"Un URL firmado para crear ejecuciones para flujos de trabajo especificados "
"en el recurso Flujo de trabajo."
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "Url firmado para manejar la alarma (extensión de Heat)."
-
msgid "A signed url to handle the alarm."
msgstr "Un url firmado para manejar la alarma."
@@ -645,9 +625,6 @@ msgstr "El recurso AccessPolicy %s no está en la pila"
msgid "Action %s not allowed for user"
msgstr "Acción %s no esta permitida por el usuario"
-msgid "Action for the RBAC policy."
-msgstr "Acción para la política RBAC."
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "Acción que se debe realizar en el tráfico que coincide con la regla."
@@ -935,10 +912,6 @@ msgstr ""
"existe"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "Intento de suprimir watch_rule: %(id)s %(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "Intento de actualizar una pila con el id: %(id)s %(msg)s"
@@ -946,10 +919,6 @@ msgstr "Intento de actualizar una pila con el id: %(id)s %(msg)s"
msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr "Intento de actualizar una pila con el id: %(id)s %(traversal)s %(msg)s"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "Intento de actualizar una vigilancia con el id: %(id)s %(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "Intento de utilizar stored_context sin user_creds"
@@ -2656,9 +2625,6 @@ msgstr "ID de instancia a asociar con EIP especificado por la propiedad EIP."
msgid "Instance ID to associate with EIP."
msgstr "ID de instancia a asociar con EIP."
-msgid "Instance ID to be restarted."
-msgstr "ID de instancia que se debe reiniciar."
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr ""
"Conexión de instancia con la API CFN/CW, validar certificados si se utiliza "
@@ -2716,14 +2682,6 @@ msgid "Invalid UUID version (%d)"
msgstr "Version UUID inválida (%d)"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr ""
-"Acción no válida %(action)s para el tipo de objeto%(obj_type)s. Acciones "
-"válidas: %(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "Acción inválida %s"
@@ -2793,12 +2751,6 @@ msgstr ""
"y de disco deben coincidir."
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr ""
-"Tipo de objeto (object_type) no válido: %(obj_type)s. Tipos de objeto "
-"(object_type) válidos: %(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr ""
"Restricciones de parámetro no válidas para el parámetro %s, se esperaba una "
@@ -2830,10 +2782,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "El nombre de pila no válido \"%s\" debe ser una cadena"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "Estado inválido %(state)s, esperando uno de %(expect)s"
-
-#, python-format
msgid "Invalid status %s"
msgstr "Estado no válido %s"
@@ -2874,10 +2822,6 @@ msgstr "Zona horaria no válida: %s"
msgid "Invalid type (%s)"
msgstr "Tipo inválido (%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "Estado de observador no válido %s"
-
msgid "Ip allocation pools and their ranges."
msgstr "Pool de asignación IP y sus rangos."
@@ -3368,9 +3312,6 @@ msgstr ""
msgid "Metric name watched by the alarm."
msgstr "Nombre de medida observado por la alarma."
-msgid "Metric statistic to evaluate."
-msgstr "Estadística de medida a evaluar."
-
msgid "Min size of the cluster."
msgstr "Tamaño mínimo del clúster."
@@ -3798,9 +3739,6 @@ msgstr ""
"Nombres de las bases de datos a las que esos usuarios pueden acceder en la "
"creación de instancias."
-msgid "Namespace for the metric."
-msgstr "Espacio de nombres para la medida."
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -4045,10 +3983,6 @@ msgstr "'%(name)s' sólo acepta un entero que no sea cero."
msgid "Operator used to compare specified statistic with threshold."
msgstr "Operador utilizado para comparar estadísticas específicas con umbral."
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr ""
-"Operador utilizado para comparar la estadística especificada con umbral."
-
msgid "Optional CA cert file to use in SSL connections."
msgstr "Certificado CA opcional a utilizar en conexiónes SSL."
@@ -5541,10 +5475,6 @@ msgstr "El puerto VIP del equilibrador de carga."
msgid "The VIP subnet of the LoadBalancer."
msgstr "La subred VIP del equilibrador de carga."
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "La Regla de Observación (%(watch_name)s) no se ha podido encontrar."
-
msgid "The action or operation requested is invalid"
msgstr "La acción o operación solicitada es invalída"
@@ -7009,9 +6939,6 @@ msgstr ""
"Identificador exclusivo de la política de cortafuegos utilizada para crear "
"el cortafuegos."
-msgid "Unit for the metric."
-msgstr "Unidad para la medida."
-
msgid "Unknown"
msgstr "Desconocido"
@@ -7058,10 +6985,6 @@ msgid "Unknown status: %s"
msgstr "Estado desconocido: %s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "Estado de observador desconocido %s"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -7210,9 +7133,6 @@ msgstr "El valor '%s' no es un entero"
msgid "Value must be a comma-delimited list string: %s"
msgstr "El valor debe ser una serie de lista delimitada por comas: %s"
-msgid "Value must be a string"
-msgstr "El valor debe ser una serie"
-
#, python-format
msgid "Value must be of type %s"
msgstr "El valor debe ser de tipo %s"
diff --git a/heat/locale/fr/LC_MESSAGES/heat.po b/heat/locale/fr/LC_MESSAGES/heat.po
index 4fa337cdb..9c7b6cb03 100644
--- a/heat/locale/fr/LC_MESSAGES/heat.po
+++ b/heat/locale/fr/LC_MESSAGES/heat.po
@@ -8,9 +8,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-17 05:29+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -393,17 +393,6 @@ msgid "A list of access rules that define access from IP to Share."
msgstr ""
"Liste de règles d'accès définissant l'accès au partage depuis une adresse IP."
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr "Une liste d'actions à exécuter lorsque l'état passe à alarme."
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr ""
-"Une liste d'actions à exécuter lorsque l'état passe à insufficient-data."
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr "Une liste d'actions à exécuter lorsque l'état passe à ok."
-
msgid "A list of all rules for the QoS policy."
msgstr "Liste de toutes les règles pour la stratégie de qualité de service."
@@ -430,12 +419,6 @@ msgstr "Liste des IP d'instance de cluster."
msgid "A list of clusters to which this policy is attached."
msgstr "Liste des clusters auxquels cette stratégie est connectée."
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr ""
-"Une liste de dimensions (paires nom/valeur arbitraires) associées à la "
-"mesure."
-
msgid "A list of host route dictionaries for the subnet."
msgstr "Liste des dictionnaires de route hôte pour le sous-réseau."
@@ -572,9 +555,6 @@ msgstr ""
"URL signée permettant de créer des exécutions pour les flux de travail "
"spécifiés dans la ressource Workflow."
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "Une URL signée pour traiter l'alarme (Extension de Heat)."
-
msgid "A signed url to handle the alarm."
msgstr "Une URL signée pour traiter l'alarme."
@@ -643,9 +623,6 @@ msgstr "La ressource AccessPolicy %s n'est pas dans la pile"
msgid "Action %s not allowed for user"
msgstr "L'action %s n'est pas autorisé pour l'utilisateur"
-msgid "Action for the RBAC policy."
-msgstr "Action pour la stratégie RBAC."
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "Action à effectuer sur le trafic appartenant à la règle."
@@ -929,10 +906,6 @@ msgstr ""
"%(id)s qui n'existe pas"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "Tentative de suppression de watch_rule : %(id)s %(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "Tentative de mise à jour d'une pile avec l'ID : %(id)s %(msg)s"
@@ -941,10 +914,6 @@ msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr ""
"Tentative de mise à jour d'une pile avec l'ID : %(id)s %(traversal)s %(msg)s"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "Tentative de mise à jour d'une surveillance avec l'ID : %(id)s %(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "Tentative d'utilisation de stored_context sans user_creds"
@@ -2643,9 +2612,6 @@ msgstr "ID instance à associer à l'EIP indiqué par la propriété EIP."
msgid "Instance ID to associate with EIP."
msgstr "ID instance à associer à EIP."
-msgid "Instance ID to be restarted."
-msgstr "ID instance à redémarrer."
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr ""
"La connexion d'instance à l'API CFN/CW valide des certificats si SSL est "
@@ -2703,14 +2669,6 @@ msgid "Invalid UUID version (%d)"
msgstr "Version (%d) UUID invalide"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr ""
-"Action non valide %(action)s pour le type d'objet %(obj_type)s. Actions "
-"valides : %(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "Action non valide %s"
@@ -2782,10 +2740,6 @@ msgstr ""
"formats du disque et du conteneur doivent correspondre."
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr "object_type non valide : %(obj_type)s. object_type valide : %(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr ""
"Contraintes de paramètre non valides pour le paramètre %s, liste attendue"
@@ -2812,11 +2766,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "Nom de pile %s non valide, doit être une chaîne"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr ""
-"Etat non valide %(state)s, l'un des états suivants %(expect)s est attendu"
-
-#, python-format
msgid "Invalid status %s"
msgstr "Status non valide %s"
@@ -2857,10 +2806,6 @@ msgstr "Fuseau horaire non valide : %s"
msgid "Invalid type (%s)"
msgstr "Type invalide (%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "Etat de surveillance %s non valide"
-
msgid "Ip allocation pools and their ranges."
msgstr "Pools d'allocation d'IP et leurs plages."
@@ -3346,9 +3291,6 @@ msgstr "Méthode d'implémentation de la fonction de persistance de session."
msgid "Metric name watched by the alarm."
msgstr "Nom d'indicateur surveillé par l'alarme."
-msgid "Metric statistic to evaluate."
-msgstr "Statistique de mesure à évaluer."
-
msgid "Min size of the cluster."
msgstr "Taille minimale du cluster."
@@ -3767,9 +3709,6 @@ msgstr ""
"Noms des bases de données auxquelles ces utilisateurs peuvent accéder lors "
"de la création d'instance."
-msgid "Namespace for the metric."
-msgstr "Espace de nom pour la mesure."
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -4015,10 +3954,6 @@ msgid "Operator used to compare specified statistic with threshold."
msgstr ""
"Opérateur utilisé pour comparer des statistiques spécifiées avec le seuil."
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr ""
-"Opérateur utilisé pour comparer les statistiques spécifiées avec le seuil."
-
msgid "Optional CA cert file to use in SSL connections."
msgstr ""
"Fichier de certificat de l'autorité de certification facultatif à utiliser "
@@ -5504,10 +5439,6 @@ msgstr "Port VIP de l'équilibreur de charge."
msgid "The VIP subnet of the LoadBalancer."
msgstr "Sous-réseau VIP de l'équilibreur de charge."
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "La règle de surveillance (%(watch_name)s) est introuvable."
-
msgid "The action or operation requested is invalid"
msgstr "L'action ou l’opération demandée est non valide"
@@ -6950,9 +6881,6 @@ msgstr ""
"Identificateur unique de la stratégie de pare-feu utilisée pour créer le "
"pare-feu."
-msgid "Unit for the metric."
-msgstr "Unité pour la mesure."
-
msgid "Unknown"
msgstr "Inconnu"
@@ -6996,10 +6924,6 @@ msgid "Unknown status: %s"
msgstr "Status inconnu: %s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "Etat de surveillance %s inconnu"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -7152,9 +7076,6 @@ msgid "Value must be a comma-delimited list string: %s"
msgstr ""
"La valeur doit être une chaîne de liste délimitée par des virgules : %s"
-msgid "Value must be a string"
-msgstr "La valeur doit être une chaine de caractère "
-
#, python-format
msgid "Value must be of type %s"
msgstr "La valeur doit être de type %s"
diff --git a/heat/locale/it/LC_MESSAGES/heat.po b/heat/locale/it/LC_MESSAGES/heat.po
index af6c8fec2..65b375072 100644
--- a/heat/locale/it/LC_MESSAGES/heat.po
+++ b/heat/locale/it/LC_MESSAGES/heat.po
@@ -6,9 +6,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-17 05:29+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -392,19 +392,6 @@ msgid "A list of access rules that define access from IP to Share."
msgstr ""
"Un elenco di regole di accesso che definisce l'accesso da IP a Condivisione."
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr ""
-"Un elenco di azioni da eseguire quando lo stato cambia in segnalazione."
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr ""
-"Un elenco di azioni da eseguire quando lo stato cambia in dati non "
-"sufficienti."
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr "Un elenco di azioni da eseguire quando lo stato cambia in ok."
-
msgid "A list of all rules for the QoS policy."
msgstr "Un elenco di regole per la politica QoS."
@@ -431,12 +418,6 @@ msgstr "Un elenco di IP istanze del cluster."
msgid "A list of clusters to which this policy is attached."
msgstr "Un elenco di cluster a cui è collegata questa politica."
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr ""
-"Un elenco di dimensioni (nome arbitrario/coppie di valori) associate alla "
-"metrica."
-
msgid "A list of host route dictionaries for the subnet."
msgstr "Un elenco di dizionari di instradamenti host per la sottorete."
@@ -572,9 +553,6 @@ msgstr ""
"Un URL firmato per creare esecuzioni per i flussi di lavoro specificati "
"nella risorsa Workflow."
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "Un url firmato per gestire la segnalazione (estensione heat)."
-
msgid "A signed url to handle the alarm."
msgstr "Url firmato per gestire la segnalazione."
@@ -643,9 +621,6 @@ msgstr "La risorsa AccessPolicy %s non presente nello stack"
msgid "Action %s not allowed for user"
msgstr "Azione %s non consentita per l'utente"
-msgid "Action for the RBAC policy."
-msgstr "Azione per la politica RBAC."
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "Azione da eseguire sul traffico corrispondente alla regola."
@@ -931,10 +906,6 @@ msgstr ""
"Tentativo di eliminazione dei crediti utente con l'id %(id)s che non esiste"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "Tentativo di eliminare una regola watch (watch_rule): %(id)s %(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "Tentativo di aggiornare uno stack con id: %(id)s %(msg)s"
@@ -942,10 +913,6 @@ msgstr "Tentativo di aggiornare uno stack con id: %(id)s %(msg)s"
msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr "Tentativo di aggiornare uno stack con id: %(id)s %(traversal)s %(msg)s"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "Tentativo di aggiornare una regola watch con id: %(id)s %(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "Tentativo di utilizzare stored_context senza nessun user_creds"
@@ -2635,9 +2602,6 @@ msgstr "ID istanza da associare a EIP specificata dalla proprietà EIP."
msgid "Instance ID to associate with EIP."
msgstr "ID istanza da associare a EIP."
-msgid "Instance ID to be restarted."
-msgstr "ID istanza da riavviare."
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr ""
"Connessione dell'istanza ai certificati di convalida dell'API CFN/CW se si "
@@ -2695,14 +2659,6 @@ msgid "Invalid UUID version (%d)"
msgstr "Versione UUID non valida (%d)"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr ""
-"Azione non valida '%(action)s' per tipo di oggetto '%(obj_type)s'. Azioni "
-"valide: %(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "Azione non valida %s"
@@ -2772,10 +2728,6 @@ msgstr ""
"formati contenitore e disco devono corrispondere."
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr "object_type non valido: %(obj_type)s. object_type validi: %(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr ""
"Vincoli del parametro per il parametro %s, non validi, previsto un elenco"
@@ -2802,10 +2754,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "Il nome stack non valido %s deve essere una stringa"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "Stato non valido %(state)s, si prevede uno di %(expect)s"
-
-#, python-format
msgid "Invalid status %s"
msgstr "Stato non valido %s"
@@ -2846,10 +2794,6 @@ msgstr "Fuso orario non valido: %s"
msgid "Invalid type (%s)"
msgstr "Tipo non valido (%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "Stato watch non valido %s"
-
msgid "Ip allocation pools and their ranges."
msgstr "Pool di allocazione Ip ed i relativi intervalli."
@@ -3331,9 +3275,6 @@ msgstr ""
msgid "Metric name watched by the alarm."
msgstr "Nome della metrica osservata dalla segnalazione."
-msgid "Metric statistic to evaluate."
-msgstr "Statistica delle metriche da valutare."
-
msgid "Min size of the cluster."
msgstr "Dimensione minima del cluster."
@@ -3759,9 +3700,6 @@ msgstr ""
"I nomi dei database che quegli utenti possono accedere durante la creazione "
"dell'istanza."
-msgid "Namespace for the metric."
-msgstr "Spazio dei nomi per la metrica."
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -4002,10 +3940,6 @@ msgid "Operator used to compare specified statistic with threshold."
msgstr ""
"Operatore utilizzato per confrontare la statistica specificata con la soglia."
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr ""
-"Operatore utilizzato per confrontare la statistica specificata con la soglia."
-
msgid "Optional CA cert file to use in SSL connections."
msgstr "File certificato CA facoltativo da utilizzare nelle connessioni SSL."
@@ -5472,10 +5406,6 @@ msgstr "La porta VIP del bilanciatore del carico."
msgid "The VIP subnet of the LoadBalancer."
msgstr "La sottorete VIP del bilanciatore del carico."
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "Impossibile trovare la regola watch (%(watch_name)s)."
-
msgid "The action or operation requested is invalid"
msgstr "L'azione o l'operazione richiesta non è valida"
@@ -6932,9 +6862,6 @@ msgstr ""
"Identificativo univoco della politica firewall utilizzato per creare il "
"firewall."
-msgid "Unit for the metric."
-msgstr "Unità per la metrica."
-
msgid "Unknown"
msgstr "Sconosciuto"
@@ -6982,10 +6909,6 @@ msgid "Unknown status: %s"
msgstr "Stato sconosciuto: %s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "Stato watch sconosciuto %s"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -7135,9 +7058,6 @@ msgstr "Il valore '%s' non è un numero intero"
msgid "Value must be a comma-delimited list string: %s"
msgstr "Il valore deve essere una stringa di elenco delimitata da virgole: %s"
-msgid "Value must be a string"
-msgstr "Il valore deve essere una stringa"
-
#, python-format
msgid "Value must be of type %s"
msgstr "Il valore deve essere di tipo %s"
diff --git a/heat/locale/ja/LC_MESSAGES/heat.po b/heat/locale/ja/LC_MESSAGES/heat.po
index b902a347a..cbc30b5a0 100644
--- a/heat/locale/ja/LC_MESSAGES/heat.po
+++ b/heat/locale/ja/LC_MESSAGES/heat.po
@@ -10,9 +10,9 @@
# Yuko Fukuda <fukuda.yuko@jp.fujitsu.com>, 2017. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-17 05:29+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -383,16 +383,6 @@ msgstr "状態㌠OK ã¸é·ç§»ã™ã‚‹éš›ã«å‘¼ã³å‡ºã™ URL (webhook) ã®ãƒªã‚¹ãƒ
msgid "A list of access rules that define access from IP to Share."
msgstr "IP ã‹ã‚‰ã‚·ã‚§ã‚¢ã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ã‚’定義ã™ã‚‹ã‚¢ã‚¯ã‚»ã‚¹ãƒ«ãƒ¼ãƒ«ã®ãƒªã‚¹ãƒˆã€‚"
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr "状態ãŒã‚¢ãƒ©ãƒ¼ãƒ ã¸é·ç§»ã—ãŸã¨ãã«å®Ÿè¡Œã™ã‚‹ã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã®ãƒªã‚¹ãƒˆã€‚"
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr "状態ãŒãƒ‡ãƒ¼ã‚¿ä¸è¶³ã¸é·ç§»ã—ãŸã¨ãã«å®Ÿè¡Œã™ã‚‹ã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã®ãƒªã‚¹ãƒˆã€‚"
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr "状態㌠ok ã¸é·ç§»ã—ãŸã¨ãã«å®Ÿè¡Œã™ã‚‹ã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã®ãƒªã‚¹ãƒˆã€‚"
-
msgid "A list of all rules for the QoS policy."
msgstr "QoS ãƒãƒªã‚·ãƒ¼ã®ã™ã¹ã¦ã®ãƒ«ãƒ¼ãƒ«ã®ãƒªã‚¹ãƒˆã€‚"
@@ -415,11 +405,6 @@ msgstr "クラスターインスタンス㮠IP ã®ãƒªã‚¹ãƒˆã€‚"
msgid "A list of clusters to which this policy is attached."
msgstr "ã“ã®ãƒãƒªã‚·ãƒ¼ãŒè¿½åŠ ã•ã‚Œã‚‹ã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ã®ãƒªã‚¹ãƒˆã€‚"
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr ""
-"メトリックã«é–¢é€£ä»˜ã‘られã¦ã„るディメンション (ä»»æ„ã®åå‰/値ã®ãƒšã‚¢) ã®ãƒªã‚¹ãƒˆã€‚"
-
msgid "A list of host route dictionaries for the subnet."
msgstr "サブãƒãƒƒãƒˆã®ãƒ›ã‚¹ãƒˆãƒ«ãƒ¼ãƒˆãƒ‡ã‚£ã‚¯ã‚·ãƒ§ãƒŠãƒªãƒ¼ã®ãƒªã‚¹ãƒˆã€‚"
@@ -543,9 +528,6 @@ msgstr ""
"ワークフローã®ãƒªã‚½ãƒ¼ã‚¹ã§æŒ‡å®šã•ã‚ŒãŸãƒ¯ãƒ¼ã‚¯ãƒ•ãƒ­ãƒ¼ã®å‡¦ç†ã‚’作æˆã™ã‚‹ãŸã‚ã®ç½²å済㿠"
"URL。"
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "アラームを処ç†ã™ã‚‹ç½²å済㿠URL (heat æ‹¡å¼µ)。"
-
msgid "A signed url to handle the alarm."
msgstr "アラームを処ç†ã™ã‚‹ç½²å済㿠URL。"
@@ -610,9 +592,6 @@ msgstr "アクセスãƒãƒªã‚·ãƒ¼ãƒªã‚½ãƒ¼ã‚¹ %s ãŒã‚¹ã‚¿ãƒƒã‚¯ã«ã‚ã‚Šã¾ã›ã‚
msgid "Action %s not allowed for user"
msgstr "アクション %s ã¯ãƒ¦ãƒ¼ã‚¶ãƒ¼ã«è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
-msgid "Action for the RBAC policy."
-msgstr "RBAC ãƒãƒªã‚·ãƒ¼ã®ã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã€‚"
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "ルールã«ä¸€è‡´ã™ã‚‹ãƒˆãƒ©ãƒ•ã‚£ãƒƒã‚¯ã«å¯¾ã—ã¦å®Ÿè¡Œã™ã‚‹ã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã€‚"
@@ -888,10 +867,6 @@ msgid "Attempt to delete user creds with id %(id)s that does not exist"
msgstr "ID ㌠%(id)s ã®å­˜åœ¨ã—ãªã„ユーザー資格情報を削除ã—よã†ã¨ã—ã¦ã„ã¾ã™"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "watch_rule: %(id)s ã®å‰Šé™¤ã‚’試行: %(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "ID %(id)s ã®ã‚¹ã‚¿ãƒƒã‚¯ã®æ›´æ–°ã‚’試行: %(msg)s"
@@ -899,10 +874,6 @@ msgstr "ID %(id)s ã®ã‚¹ã‚¿ãƒƒã‚¯ã®æ›´æ–°ã‚’試行: %(msg)s"
msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr "%(id)s %(traversal)s %(msg)s ã® ID ã§ã‚¹ã‚¿ãƒƒã‚¯ã®æ›´æ–°ã‚’試ã¿ã¾ã™ã€‚"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "ID %(id)s ã®ç›£è¦–ã®æ›´æ–°ã‚’試行: %(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "user_creds ãªã—㧠stored_context を使用ã—よã†ã¨ã—ã¦ã„ã¾ã™"
@@ -2571,9 +2542,6 @@ msgstr "EIP プロパティーã§æŒ‡å®šã•ã‚ŒãŸ EIP ã«é–¢é€£ä»˜ã‘るインス
msgid "Instance ID to associate with EIP."
msgstr "EIP ã«é–¢é€£ä»˜ã‘るインスタンス ID。"
-msgid "Instance ID to be restarted."
-msgstr "å†å§‹å‹•ã™ã‚‹ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ ID。"
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr "SSL ãŒä½¿ç”¨ã•ã‚Œã¦ã„ã‚‹å ´åˆã® CFN/CW API 証明書検証ã¸ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹æŽ¥ç¶šã€‚"
@@ -2629,14 +2597,6 @@ msgid "Invalid UUID version (%d)"
msgstr "UUID ãƒãƒ¼ã‚¸ãƒ§ãƒ³ (%d) ã¯ç„¡åŠ¹ã§ã™"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr ""
-"オブジェクトタイプ %(obj_type)s ã«é–¢ã™ã‚‹ç„¡åŠ¹ãªã‚¢ã‚¯ã‚·ãƒ§ãƒ³ %(action)s。有効ãªã‚¢"
-"クション: %(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "アクション %s ã¯ç„¡åŠ¹ã§ã™"
@@ -2719,10 +2679,6 @@ msgstr ""
"ナーã¨ãƒ‡ã‚£ã‚¹ã‚¯ã®å½¢å¼ãŒä¸€è‡´ã—ã¦ã„ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。"
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr "無効㪠object_type: %(obj_type)s。有効㪠object_type :%(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr "パラメーター %s ã®ãƒ‘ラメーター制約ãŒç„¡åŠ¹ã§ã™ã€‚リストãŒå¿…è¦ã§ã™"
@@ -2755,10 +2711,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "無効ãªã‚¹ã‚¿ãƒƒã‚¯å %s ã¯æ–‡å­—列ã§ã‚ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "状態 %(state)s ã¯ç„¡åŠ¹ã§ã™ã€‚%(expect)s ã®ã„ãšã‚Œã‹ãŒå¿…è¦ã§ã™"
-
-#, python-format
msgid "Invalid status %s"
msgstr "çŠ¶æ³ %s ã¯ç„¡åŠ¹ã§ã™"
@@ -2800,10 +2752,6 @@ msgstr "無効ãªã‚¿ã‚¤ãƒ ã‚¾ãƒ¼ãƒ³: %s"
msgid "Invalid type (%s)"
msgstr "無効ãªã‚¿ã‚¤ãƒ— (%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "監視状態 %s ã¯ç„¡åŠ¹ã§ã™"
-
msgid "Ip allocation pools and their ranges."
msgstr "IP ã®å‰²ã‚Šå½“ã¦ãƒ—ールã¨ãã®ç¯„囲。"
@@ -3263,12 +3211,6 @@ msgstr "セッション永続性機能ã®å®Ÿè£…方法。"
msgid "Metric name watched by the alarm."
msgstr "アラームã«ã‚ˆã£ã¦ç›£è¦–ã•ã‚Œã‚‹ãƒ¡ãƒˆãƒªãƒƒã‚¯å。"
-msgid "Metric statistic to evaluate."
-msgstr "評価ã™ã‚‹ãƒ¡ãƒˆãƒªãƒƒã‚¯çµ±è¨ˆã€‚"
-
-msgid "MetricData list"
-msgstr "MetricData リスト"
-
msgid "Min size of the cluster."
msgstr "クラスターã®æœ€å°ã‚µã‚¤ã‚ºã€‚"
@@ -3675,9 +3617,6 @@ msgid "Names of databases that those users can access on instance creation."
msgstr ""
"インスタンス作æˆæ™‚ã«ã“れらã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒã‚¢ã‚¯ã‚»ã‚¹å¯èƒ½ãªãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã®åå‰ã€‚"
-msgid "Namespace for the metric."
-msgstr "メトリックã®åå‰ç©ºé–“。"
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -3910,9 +3849,6 @@ msgstr "'%(name)s' ã§ã¯ã‚¼ãƒ­ä»¥å¤–ã®æ•´æ•°ã®ã¿ãŒå—ã‘入れられã¾ã™ã
msgid "Operator used to compare specified statistic with threshold."
msgstr "指定ã•ã‚ŒãŸçµ±è¨ˆã‚’ã—ãã„値ã¨æ¯”較ã™ã‚‹ãŸã‚ã«ä½¿ç”¨ã™ã‚‹æ¼”ç®—å­ã€‚"
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr "指定ã•ã‚ŒãŸã€Œã—ãã„値をæŒã¤çµ±è¨ˆã€ã‚’比較ã™ã‚‹ãŸã‚ã«ä½¿ç”¨ã™ã‚‹æ¼”ç®—å­ã€‚"
-
msgid "Optional CA cert file to use in SSL connections."
msgstr "SSL 接続ã«ä½¿ç”¨ã™ã‚‹ CA 証明書ファイル。オプション。"
@@ -5369,10 +5305,6 @@ msgstr "ロードãƒãƒ©ãƒ³ã‚µãƒ¼ ã® VIP ãƒãƒ¼ãƒˆã€‚"
msgid "The VIP subnet of the LoadBalancer."
msgstr "ロードãƒãƒ©ãƒ³ã‚µãƒ¼ã® VIP サブãƒãƒƒãƒˆã€‚"
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "監視ルール %(watch_name)s) ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
-
msgid "The action or operation requested is invalid"
msgstr "è¦æ±‚ã•ã‚ŒãŸã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã¾ãŸã¯æ“作ã¯ç„¡åŠ¹ã§ã™"
@@ -6748,9 +6680,6 @@ msgstr "ã“ã®ãƒ•ã‚¡ã‚¤ã‚¢ã‚¦ã‚©ãƒ¼ãƒ«ãƒ«ãƒ¼ãƒ«ãŒå±žã™ã‚‹ãƒ•ã‚¡ã‚¤ã‚¢ã‚¦ã‚©ãƒ¼
msgid "Unique identifier of the firewall policy used to create the firewall."
msgstr "ファイアウォールã®ä½œæˆã«ä½¿ç”¨ã™ã‚‹ãƒ•ã‚¡ã‚¤ã‚¢ã‚¦ã‚©ãƒ¼ãƒ«ãƒãƒªã‚·ãƒ¼ã®å›ºæœ‰ ID。"
-msgid "Unit for the metric."
-msgstr "メトリックã®å˜ä½ã€‚"
-
msgid "Unknown"
msgstr "ä¸æ˜Ž"
@@ -6793,10 +6722,6 @@ msgid "Unknown status: %s"
msgstr "ä¸æ˜ŽçŠ¶æ³: %s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "監視状態 %s ã¯ä¸æ˜Žã§ã™"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -6944,9 +6869,6 @@ msgstr "値 '%s' ã¯æ•´æ•°ã§ã¯ã‚ã‚Šã¾ã›ã‚“"
msgid "Value must be a comma-delimited list string: %s"
msgstr "値ã¯ã‚³ãƒ³ãƒžåŒºåˆ‡ã‚Šä¸€è¦§ã®æ–‡å­—列ã§ã‚ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™: %s"
-msgid "Value must be a string"
-msgstr "値ã¯æ–‡å­—列ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-
#, python-format
msgid "Value must be of type %s"
msgstr "値ã®ã‚¿ã‚¤ãƒ—㯠%s ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
diff --git a/heat/locale/ko_KR/LC_MESSAGES/heat.po b/heat/locale/ko_KR/LC_MESSAGES/heat.po
index 49fd79dda..004040df6 100644
--- a/heat/locale/ko_KR/LC_MESSAGES/heat.po
+++ b/heat/locale/ko_KR/LC_MESSAGES/heat.po
@@ -9,18 +9,18 @@
# minwook-shin <minwook0106@gmail.com>, 2017. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-28 16:10+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-08-05 01:52+0000\n"
"Last-Translator: minwook-shin <minwook0106@gmail.com>\n"
-"Language: ko-KR\n"
+"Language: ko_KR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Korean (South Korea)\n"
#, python-format
@@ -379,16 +379,6 @@ msgstr "확ì¸ìœ¼ë¡œ ìƒíƒœ ì „ì´ ì‹œ 호출할 URLì˜ ëª©ë¡(웹 후í¬)입니
msgid "A list of access rules that define access from IP to Share."
msgstr "IPì—ì„œ ê³µìœ ë¡œì˜ ì•¡ì„¸ìŠ¤ë¥¼ ì •ì˜í•˜ëŠ” 액세스 규칙 목ë¡ìž…니다."
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr "알람으로 ìƒíƒœ ì „ì´ ì‹œ 실행할 조치 목ë¡ìž…니다."
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr "충분하지 ì•Šì€ ë°ì´í„°ë¡œ ìƒíƒœ ì „ì´ ì‹œ 실행할 조치 목ë¡ìž…니다."
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr "확ì¸ìœ¼ë¡œ ìƒíƒœ ì „ì´ ì‹œ 실행할 조치 목ë¡ìž…니다."
-
msgid "A list of all rules for the QoS policy."
msgstr "QoS ì •ì±…ì˜ ëª¨ë“  규칙 목ë¡ìž…니다."
@@ -411,10 +401,6 @@ msgstr "í´ëŸ¬ìŠ¤í„° ì¸ìŠ¤í„´ìŠ¤ IP 목ë¡ìž…니다."
msgid "A list of clusters to which this policy is attached."
msgstr "ì´ ì •ì±…ì´ ì—°ê²°ë  í´ëŸ¬ìŠ¤í„° 목ë¡ìž…니다."
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr "메트릭과 ì—°ê´€ëœ ì°¨ì› ëª©ë¡ìž…니다(ìž„ì˜ì˜ ì´ë¦„/ê°’ ìŒ)입니다."
-
msgid "A list of host route dictionaries for the subnet."
msgstr "ì„œë¸Œë„·ì˜ í˜¸ìŠ¤íŠ¸ 경로 사전 목ë¡ìž…니다."
@@ -532,9 +518,6 @@ msgid ""
msgstr ""
"워í¬í”Œë¡œìš° ìžì›ì— ì§€ì •ëœ ì›Œí¬í”Œë¡œìš°ì˜ ì‹¤í–‰ì„ ìž‘ì„±í•˜ëŠ” ì„œëª…ëœ url입니다."
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "ì•ŒëžŒì„ ì²˜ë¦¬í•  ì„œëª…ëœ URL입니다(히트 확장)."
-
msgid "A signed url to handle the alarm."
msgstr "ì•ŒëžŒì„ ì²˜ë¦¬í•  ì„œëª…ëœ URL입니다."
@@ -598,9 +581,6 @@ msgstr "AccessPolicy ìžì› %sì´(ê°€) 스íƒì— ì—†ìŒ"
msgid "Action %s not allowed for user"
msgstr "사용ìžì—게 조치 %sì´(ê°€) 허용ë˜ì§€ ì•ŠìŒ"
-msgid "Action for the RBAC policy."
-msgstr "RBAC ì •ì±…ì˜ ìž‘ì—…ìž…ë‹ˆë‹¤."
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "규칙과 ì¼ì¹˜í•˜ëŠ” íŠ¸ëž˜í”½ì— ëŒ€í•´ 수행할 조치입니다."
@@ -857,10 +837,6 @@ msgid "Attempt to delete user creds with id %(id)s that does not exist"
msgstr "존재하지 않는 IDê°€ %(id)sì¸ ì‚¬ìš©ìž ì‹ ìž„ 정보를 삭제하려고 ì‹œë„"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "watch_ruleì„ ì‚­ì œí•˜ë ¤ê³  ì‹œë„: %(id)s %(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "IDë¡œ 스íƒì„ ì—…ë°ì´íŠ¸í•˜ë ¤ê³  ì‹œë„: %(id)s %(msg)s"
@@ -868,10 +844,6 @@ msgstr "IDë¡œ 스íƒì„ ì—…ë°ì´íŠ¸í•˜ë ¤ê³  ì‹œë„: %(id)s %(msg)s"
msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr "IDë¡œ 스íƒì„ ì—…ë°ì´íŠ¸í•˜ë ¤ê³  ì‹œë„: %(id)s %(traversal)s %(msg)s"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "IDë¡œ ê°ì‹œë¥¼ ì—…ë°ì´íŠ¸í•˜ë ¤ê³  ì‹œë„: %(id)s %(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "user_creds ì—†ì´ stored_context를 사용하려고 ì‹œë„í–ˆìŒ"
@@ -2468,9 +2440,6 @@ msgstr "EIP 특성으로 ì§€ì •ëœ EIP와 연관시킬 ì¸ìŠ¤í„´ìŠ¤ ID입니다.
msgid "Instance ID to associate with EIP."
msgstr "EIP와 연관시킬 ì¸ìŠ¤í„´ìŠ¤ ID입니다."
-msgid "Instance ID to be restarted."
-msgstr "다시 시작할 ì¸ìŠ¤í„´ìŠ¤ ID입니다."
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr ""
"SSLì„ ì‚¬ìš©í•˜ëŠ” 경우 CFN/CW API 유효성 ê²€ì¦ ì¸ì¦ì„œì— 대한 ì¸ìŠ¤í„´ìŠ¤ 연결입니다."
@@ -2527,14 +2496,6 @@ msgid "Invalid UUID version (%d)"
msgstr "올바르지 ì•Šì€ UUID 버전(%d)"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr ""
-"오브ì íŠ¸ 타입 %(obj_type)sì— ì˜¬ë°”ë¥´ì§€ ì•Šì€ ìž‘ì—… %(action)s. 올바른 ìž‘ì—…: "
-"%(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "올바르지 ì•Šì€ ì¡°ì¹˜ %s"
@@ -2604,10 +2565,6 @@ msgstr ""
"야 합니다."
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr "올바르지 ì•Šì€ object_type: %(obj_type)s. 올바른 object_type :%(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr "%s ë§¤ê°œë³€ìˆ˜ì— ì˜¬ë°”ë¥´ì§€ ì•Šì€ ë§¤ê°œë³€ìˆ˜ 제한조건. 목ë¡ì´ 예ìƒë¨"
@@ -2632,10 +2589,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "올바르지 ì•Šì€ ìŠ¤íƒ ì´ë¦„ %s, 문ìžì—´ì´ì–´ì•¼ 함"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "올바르지 ì•Šì€ ìƒíƒœ %(state)s. %(expect)s 중 하나 예ìƒ"
-
-#, python-format
msgid "Invalid status %s"
msgstr "올바르지 ì•Šì€ ìƒíƒœ %s"
@@ -2675,10 +2628,6 @@ msgstr "올바르지 ì•Šì€ ì‹œê°„ëŒ€: %s"
msgid "Invalid type (%s)"
msgstr "올바르지 ì•Šì€ ìœ í˜•(%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "올바르지 ì•Šì€ ê°ì‹œ ìƒíƒœ %s"
-
msgid "Ip allocation pools and their ranges."
msgstr "Ip 할당 í’€ ë° í•´ë‹¹ 범위입니다."
@@ -3113,9 +3062,6 @@ msgstr "세션 지ì†ì„± ê¸°ëŠ¥ì˜ êµ¬í˜„ 메소드입니다."
msgid "Metric name watched by the alarm."
msgstr "알람으로 ê°ì‹œí•˜ëŠ” 메트릭 ì´ë¦„입니다."
-msgid "Metric statistic to evaluate."
-msgstr "í‰ê°€í•  메트릭 통계입니다."
-
msgid "Min size of the cluster."
msgstr "í´ëŸ¬ìŠ¤í„°ì˜ 최소 í¬ê¸°ìž…니다."
@@ -3511,9 +3457,6 @@ msgstr "볼륨 íƒ€ìž…ì˜ ì´ë¦„ ë˜ëŠ” id(OS::Cinder::VolumeType)."
msgid "Names of databases that those users can access on instance creation."
msgstr "사용ìžê°€ ì¸ìŠ¤í„´ìŠ¤ 작성 ì‹œ 액세스할 수 있는 ë°ì´í„°ë² ì´ìŠ¤ì˜ ì´ë¦„입니다."
-msgid "Namespace for the metric."
-msgstr "ë©”íŠ¸ë¦­ì˜ ë„¤ìž„ìŠ¤íŽ˜ì´ìŠ¤ìž…니다."
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -3740,9 +3683,6 @@ msgstr "'%(name)s'ì—서는 0ì´ ì•„ë‹Œ 정수만 허용ë©ë‹ˆë‹¤. "
msgid "Operator used to compare specified statistic with threshold."
msgstr "ì§€ì •ëœ í†µê³„ë¥¼ 임계값과 비êµí•˜ëŠ” ë° ì‚¬ìš©ëœ ì—°ì‚°ìžìž…니다."
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr "ì§€ì •ëœ í†µê³„ë¥¼ 임계값과 비êµí•˜ëŠ” ë° ì‚¬ìš©ëœ ì—°ì‚°ìžìž…니다."
-
msgid "Optional CA cert file to use in SSL connections."
msgstr "SSL ì—°ê²°ì—ì„œ 사용할 ì„ íƒì  CA ì¸ì¦ 파ì¼ìž…니다."
@@ -5140,10 +5080,6 @@ msgstr "LoadBalancerì˜ VIP í¬íŠ¸ìž…니다."
msgid "The VIP subnet of the LoadBalancer."
msgstr "LoadBalancerì˜ VIP 서브넷입니다."
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "ê°ì‹œ 규칙(%(watch_name)s)ì„ ì°¾ì„ ìˆ˜ 없습니다. "
-
msgid "The action or operation requested is invalid"
msgstr "요청한 ë™ìž‘ì´ë‚˜ ìž‘ì—…ì´ ìž˜ëª»ë˜ì—ˆìŠµë‹ˆë‹¤"
@@ -6490,9 +6426,6 @@ msgstr "ì´ ë°©í™”ë²½ ê·œì¹™ì´ ì†í•˜ëŠ” 방화벽 ì •ì±…ì˜ ê³ ìœ  ID입니다
msgid "Unique identifier of the firewall policy used to create the firewall."
msgstr "ë°©í™”ë²½ì„ ìž‘ì„±í•˜ëŠ” ë° ì‚¬ìš©ë˜ëŠ” 방화벽 ì •ì±…ì˜ ê³ ìœ  ID입니다."
-msgid "Unit for the metric."
-msgstr "ë©”íŠ¸ë¦­ì˜ ë‹¨ìœ„ìž…ë‹ˆë‹¤."
-
msgid "Unknown"
msgstr "ì•Œ 수 ì—†ìŒ"
@@ -6535,10 +6468,6 @@ msgid "Unknown status: %s"
msgstr "ì•Œ 수 없는 ìƒíƒœ: %s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "ì•Œ 수 없는 ê°ì‹œ ìƒíƒœ %s"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -6686,9 +6615,6 @@ msgstr "ê°’ '%s'ì´(ê°€) 정수가 아님"
msgid "Value must be a comma-delimited list string: %s"
msgstr "ê°’ì€ ì‰¼í‘œë¡œ êµ¬ë¶„ëœ ëª©ë¡ ë¬¸ìžì—´ì´ì–´ì•¼ 함: %s"
-msgid "Value must be a string"
-msgstr "ê°’ì€ ë¬¸ìžì—´ì´ì–´ì•¼ 함"
-
#, python-format
msgid "Value must be of type %s"
msgstr "ê°’ì€ %s 유형ì´ì–´ì•¼ 함"
diff --git a/heat/locale/pt_BR/LC_MESSAGES/heat.po b/heat/locale/pt_BR/LC_MESSAGES/heat.po
index 825c01819..832d60588 100644
--- a/heat/locale/pt_BR/LC_MESSAGES/heat.po
+++ b/heat/locale/pt_BR/LC_MESSAGES/heat.po
@@ -8,18 +8,18 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-28 16:10+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 05:31+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language: pt-BR\n"
+"Language: pt_BR\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Portuguese (Brazil)\n"
#, python-format
@@ -392,22 +392,6 @@ msgstr ""
"Uma lista de regras de acesso que definem o acesso a partir do IP para "
"Share. "
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr ""
-"Uma lista de ações a serem executadas quando o estado faz a transição para o "
-"alarme."
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr ""
-"Uma lista de ações a serem executadas quando o estado faz a transição para "
-"dados insuficientes."
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr ""
-"Uma lista de ações a serem executadas quando o estado faz a transição para "
-"ok."
-
msgid "A list of all rules for the QoS policy."
msgstr "Uma lista de todas as regras da política do QoS."
@@ -434,11 +418,6 @@ msgstr "Uma lista de IPs de instância de cluster."
msgid "A list of clusters to which this policy is attached."
msgstr "Uma lista de clusters à qual essa política é anexada. "
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr ""
-"Uma lista de dimensões (pares nome/valor arbitrários) associada a métrica."
-
msgid "A list of host route dictionaries for the subnet."
msgstr "Uma lista de dicionários de rota do host para a sub-rede."
@@ -574,9 +553,6 @@ msgstr ""
"URL assinada para criar execuções para fluxos de trabalho especificados no "
"recurso de Fluxo de Trabalho."
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "Uma url assinada para manipular o alarme (extensão Heat)."
-
msgid "A signed url to handle the alarm."
msgstr "Uma url assinada para lidar com o alarme."
@@ -644,9 +620,6 @@ msgstr "Recurso AccessPolicy %s fora da pilha"
msgid "Action %s not allowed for user"
msgstr "Ação %s não permitida para o usuário"
-msgid "Action for the RBAC policy."
-msgstr "Ação para a política do RBAC."
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "Ação a ser executada no tráfego correspondendo a regra."
@@ -927,10 +900,6 @@ msgid "Attempt to delete user creds with id %(id)s that does not exist"
msgstr "Tentativa de excluir creds do usuário com o ID %(id)s que não existe"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "Tentativa de excluir watch_rule: %(id)s%(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "Tentativa de atualizar uma pilha com ID: %(id)s%(msg)s"
@@ -938,10 +907,6 @@ msgstr "Tentativa de atualizar uma pilha com ID: %(id)s%(msg)s"
msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr "Tentativa de atualizar uma pilha com ID: %(id)s %(traversal)s %(msg)s"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "Tentativa de atualizar um relógio com ID: %(id)s%(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "Tentativa de usar stored_context sem user_creds"
@@ -2605,9 +2570,6 @@ msgstr ""
msgid "Instance ID to associate with EIP."
msgstr "ID da Instância para associar a EIP."
-msgid "Instance ID to be restarted."
-msgstr "ID da instância a ser reiniciada."
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr ""
"Conexão da instância para API de CFN/CW validar certificados se o SSL for "
@@ -2665,14 +2627,6 @@ msgid "Invalid UUID version (%d)"
msgstr "Versão de UUID inválida (%d)"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr ""
-"Ação inválida %(action)s para o tipo de objeto %(obj_type)s. Ações válidas :"
-"%(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "Ação inválida %s"
@@ -2742,10 +2696,6 @@ msgstr ""
"formatos de contêiner e disco devem corresponder."
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr "Object_type inválido: %(obj_type)s. Object_type válido:%(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr ""
"Restrições de parâmetro inválidas para o parâmetro %s; esperada uma lista"
@@ -2771,10 +2721,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "Nome de pilha inválido %s, deve ser uma sequência"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "Estado inválido %(state)s, esperando um de %(expect)s"
-
-#, python-format
msgid "Invalid status %s"
msgstr "Status inválido %s"
@@ -2814,10 +2760,6 @@ msgstr "Fuso horário inválido: %s"
msgid "Invalid type (%s)"
msgstr "Tipo inválido (%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "Estado de relógio inválido %s"
-
msgid "Ip allocation pools and their ranges."
msgstr "Conjuntos de alocação de IP e seus intervalos."
@@ -3293,9 +3235,6 @@ msgstr "Método de implementação do recurso de persistência de sessão."
msgid "Metric name watched by the alarm."
msgstr "Nome da métrica inspecionada pelo alarme."
-msgid "Metric statistic to evaluate."
-msgstr "Estatística de métrica para avaliar."
-
msgid "Min size of the cluster."
msgstr "Tamanho mín. do cluster."
@@ -3706,9 +3645,6 @@ msgstr ""
"Nomes de bancos de dados que os usuários podem acessar na criação da "
"instância."
-msgid "Namespace for the metric."
-msgstr "Namespace para métrica."
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -3949,10 +3885,6 @@ msgid "Operator used to compare specified statistic with threshold."
msgstr ""
"O operador utilizado para comparar a estatística especificada com o limite."
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr ""
-"O operador utilizado para comparar o limite especificado com estatística."
-
msgid "Optional CA cert file to use in SSL connections."
msgstr "Arquivo de certificado de CA opcional a ser usado em conexões SSL."
@@ -5413,10 +5345,6 @@ msgstr "A porta VIP do LoadBalancer."
msgid "The VIP subnet of the LoadBalancer."
msgstr "A sub-rede VIP do LoadBalancer."
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "A Regra de Observação (%(watch_name)s) não pôde ser localizada."
-
msgid "The action or operation requested is invalid"
msgstr "A ação ou operação solicitada é inválida"
@@ -6845,9 +6773,6 @@ msgstr ""
"O identificador exclusivo da política de firewall usada para criar o "
"firewall."
-msgid "Unit for the metric."
-msgstr "Unidade para a métrica."
-
msgid "Unknown"
msgstr "Desconhecido"
@@ -6892,10 +6817,6 @@ msgid "Unknown status: %s"
msgstr "Status desconhecido: %s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "Estado de relógio desconhecido %s"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -7045,9 +6966,6 @@ msgstr "Valor '%s' não é um número inteiro"
msgid "Value must be a comma-delimited list string: %s"
msgstr "Valor deve ser uma sequência de lista delimitada por vírgulas: %s"
-msgid "Value must be a string"
-msgstr "Valor deve ser uma sequência"
-
#, python-format
msgid "Value must be of type %s"
msgstr "O valor deve ser do tipo %s"
diff --git a/heat/locale/ru/LC_MESSAGES/heat.po b/heat/locale/ru/LC_MESSAGES/heat.po
index 40fb8a5b4..e8e6adef3 100644
--- a/heat/locale/ru/LC_MESSAGES/heat.po
+++ b/heat/locale/ru/LC_MESSAGES/heat.po
@@ -6,9 +6,9 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-17 05:29+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -389,18 +389,6 @@ msgstr ""
msgid "A list of access rules that define access from IP to Share."
msgstr "СпиÑок прав доÑтупа к общему реÑурÑу Ð´Ð»Ñ IP-адреÑов."
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr ""
-"СпиÑок дейÑтвий, выполнÑемых при изменении ÑоÑтоÑÐ½Ð¸Ñ Ð½Ð° предупреждение."
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr ""
-"СпиÑок дейÑтвий, выполнÑемых при изменении ÑоÑтоÑÐ½Ð¸Ñ Ð½Ð° недоÑтаточно-данных."
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr "СпиÑок дейÑтвий, выполнÑемых при изменении ÑоÑтоÑÐ½Ð¸Ñ Ð½Ð° OK."
-
msgid "A list of all rules for the QoS policy."
msgstr "СпиÑок вÑех правил Ñтратегии QoS."
@@ -427,12 +415,6 @@ msgstr "СпиÑок IP-адреÑов ÑкземплÑров клаÑтера."
msgid "A list of clusters to which this policy is attached."
msgstr "СпиÑок клаÑтеров, которым назначена Ñта ÑтратегиÑ."
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr ""
-"СпиÑок размерноÑтей (произвольные пары имÑ/значение), ÑвÑзанных Ñ "
-"показателем."
-
msgid "A list of host route dictionaries for the subnet."
msgstr "СпиÑок Ñловарей Ñ Ð¼Ð°Ñ€ÑˆÑ€ÑƒÑ‚Ð°Ð¼Ð¸ хоÑтов Ð´Ð»Ñ Ð¿Ð¾Ð´Ñети."
@@ -564,9 +546,6 @@ msgid ""
msgstr ""
"ПодпиÑанный url Ð´Ð»Ñ Ð·Ð°Ð¿ÑƒÑка потоков операций, указанных в реÑурÑе Workflow."
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "ПодпиÑанный url Ð´Ð»Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ Ð¿Ñ€ÐµÐ´ÑƒÐ¿Ñ€ÐµÐ¶Ð´ÐµÐ½Ð¸Ñ (раÑширение Heat)."
-
msgid "A signed url to handle the alarm."
msgstr "ПодпиÑанный url Ð´Ð»Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ предупреждениÑ."
@@ -630,9 +609,6 @@ msgstr "РеÑÑƒÑ€Ñ AccessPolicy %s находитÑÑ Ð½Ðµ в Ñтеке"
msgid "Action %s not allowed for user"
msgstr "ДейÑтвие %s запрещено Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ"
-msgid "Action for the RBAC policy."
-msgstr "ДейÑтвие Ð´Ð»Ñ Ñтратегии RBAC."
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "ДейÑтвие Ð´Ð»Ñ Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð½Ð°Ð´ трафиком, ÑоответÑтвующим правилу."
@@ -910,10 +886,6 @@ msgstr ""
"%(id)s"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "Попытка ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ watch_rule: %(id)s %(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "Попытка Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ñтека Ñ Ð˜Ð”: %(id)s %(msg)s"
@@ -921,10 +893,6 @@ msgstr "Попытка Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ñтека Ñ Ð˜Ð”: %(id)s %(msg)s"
msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr "Попытка Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ñтека Ñ Ð˜Ð”: %(id)s %(traversal)s %(msg)s"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "Попытка Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð¾Ñ‚ÑÐ»ÐµÐ¶Ð¸Ð²Ð°Ð½Ð¸Ñ Ñ Ð˜Ð”: %(id)s %(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "Попытка иÑпользовать stored_context без user_creds"
@@ -2581,9 +2549,6 @@ msgstr "ИД ÑкземплÑра Ð´Ð»Ñ ÑвÑзи Ñ EIP, указанным Ð
msgid "Instance ID to associate with EIP."
msgstr "ИД ÑкземплÑра Ð´Ð»Ñ ÑвÑзи Ñ EIP."
-msgid "Instance ID to be restarted."
-msgstr "ИД ÑкземплÑра Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ð¿ÑƒÑка."
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr ""
"Соединение ÑкземплÑра Ñ API CFN/CW проверÑет Ñертификаты при иÑпользовании "
@@ -2641,14 +2606,6 @@ msgid "Invalid UUID version (%d)"
msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ UUID (%d)"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr ""
-"ÐедопуÑтимое дейÑтвие '%(action)s' Ð´Ð»Ñ Ñ‚Ð¸Ð¿Ð° объекта '%(obj_type)s'. "
-"ДопуÑтимые дейÑтвиÑ: %(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "Ðеверное дейÑтвие %s"
@@ -2719,11 +2676,6 @@ msgstr ""
"диÑка должны Ñовпадать."
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr ""
-"ÐедопуÑтимый тип объекта: %(obj_type)s. ДопуÑтимые типы объектов: %(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr "ÐедопуÑтимые Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð½Ð° параметр %s, ожидалÑÑ ÑпиÑок"
@@ -2748,10 +2700,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ñтека %s. Оно должно быть Ñтрокой"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "ÐедопуÑтимое ÑоÑтоÑние %(state)s, ожидаетÑÑ Ð¾Ð´Ð½Ð¾ из %(expect)s"
-
-#, python-format
msgid "Invalid status %s"
msgstr "Ðеверное ÑоÑтоÑние %s"
@@ -2791,10 +2739,6 @@ msgstr "ÐедопуÑтимый чаÑовой поÑÑ: %s"
msgid "Invalid type (%s)"
msgstr "Ðеверный тип (%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "Ðеверное ÑоÑтоÑние отÑÐ»ÐµÐ¶Ð¸Ð²Ð°Ð½Ð¸Ñ %s"
-
msgid "Ip allocation pools and their ranges."
msgstr "Пулы Ð²Ñ‹Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ip и их диапазоны."
@@ -3263,9 +3207,6 @@ msgstr "Метод реализации функции ÑÐ¾Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ñо
msgid "Metric name watched by the alarm."
msgstr "Ð˜Ð¼Ñ Ð¿Ð¾ÐºÐ°Ð·Ð°Ñ‚ÐµÐ»Ñ, отÑлеживаемого в предупреждении."
-msgid "Metric statistic to evaluate."
-msgstr "СтатиÑтика по показателÑм Ð´Ð»Ñ Ð¾Ñ†ÐµÐ½ÐºÐ¸."
-
msgid "Min size of the cluster."
msgstr "Минимальный размер клаÑтера."
@@ -3668,9 +3609,6 @@ msgstr "Ð˜Ð¼Ñ Ð¸Ð»Ð¸ ИД типа тома (OS::Cinder::VolumeType)."
msgid "Names of databases that those users can access on instance creation."
msgstr "Имена баз данных, доÑтупных пользователÑм при Ñоздании ÑкземплÑра."
-msgid "Namespace for the metric."
-msgstr "ПроÑтранÑтво имен Ð´Ð»Ñ Ð¿Ð¾ÐºÐ°Ð·Ð°Ñ‚ÐµÐ»Ñ."
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -3903,9 +3841,6 @@ msgstr "Ð”Ð»Ñ %(name)s допуÑтимо только целое чиÑло, Ð
msgid "Operator used to compare specified statistic with threshold."
msgstr "Оператор Ð´Ð»Ñ ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ð¾Ð¹ ÑтатиÑтики Ñ Ð¿Ð¾Ñ€Ð¾Ð³Ð¾Ð¼."
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr "Оператор, иÑпользуемый Ð´Ð»Ñ ÑÑ€Ð°Ð²Ð½ÐµÐ½Ð¸Ñ ÑтатиÑтики Ñ Ð¿Ð¾Ñ€Ð¾Ð³Ð¾Ð¼."
-
msgid "Optional CA cert file to use in SSL connections."
msgstr ""
"ÐеобÑзательный файл Ñертификата CA Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð² ÑоединениÑÑ… SSL."
@@ -5338,10 +5273,6 @@ msgstr "Порт VIP баланÑировщика нагрузки."
msgid "The VIP subnet of the LoadBalancer."
msgstr "ПодÑеть VIP баланÑировщика нагрузки."
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "Правило отÑÐ»ÐµÐ¶Ð¸Ð²Ð°Ð½Ð¸Ñ (%(watch_name)s) не найдено."
-
msgid "The action or operation requested is invalid"
msgstr "ЗапрашиваетÑÑ Ð½ÐµÐ²ÐµÑ€Ð½Ð¾Ðµ дейÑтвие или операциÑ"
@@ -6761,9 +6692,6 @@ msgstr ""
"Уникальный идентификатор Ñтратегии брандмауÑра, иÑпользуемый Ð´Ð»Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ "
"брандмауÑра."
-msgid "Unit for the metric."
-msgstr "Единица Ð¸Ð·Ð¼ÐµÑ€ÐµÐ½Ð¸Ñ Ð¿Ð¾ÐºÐ°Ð·Ð°Ñ‚ÐµÐ»Ñ."
-
msgid "Unknown"
msgstr "ÐеизвеÑтно"
@@ -6806,10 +6734,6 @@ msgid "Unknown status: %s"
msgstr "ÐеизвеÑтное ÑоÑтоÑние: %s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "ÐеизвеÑтное ÑоÑтоÑние отÑÐ»ÐµÐ¶Ð¸Ð²Ð°Ð½Ð¸Ñ %s"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -6958,9 +6882,6 @@ msgstr "Значение '%s' не ÑвлÑетÑÑ Ñ†ÐµÐ»Ñ‹Ð¼"
msgid "Value must be a comma-delimited list string: %s"
msgstr "Значение должно быть Ñтрокой ÑпиÑка, разделенного запÑтыми: %s"
-msgid "Value must be a string"
-msgstr "Значение должно быть Ñтрокой"
-
#, python-format
msgid "Value must be of type %s"
msgstr "Значение должно иметь тип %s"
diff --git a/heat/locale/zh_CN/LC_MESSAGES/heat.po b/heat/locale/zh_CN/LC_MESSAGES/heat.po
index 25f03a6b7..f3107a45f 100644
--- a/heat/locale/zh_CN/LC_MESSAGES/heat.po
+++ b/heat/locale/zh_CN/LC_MESSAGES/heat.po
@@ -7,18 +7,18 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-28 16:10+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 05:32+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language: zh-CN\n"
+"Language: zh_CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Chinese (China)\n"
#, python-format
@@ -358,16 +358,6 @@ msgstr "当状æ€è¿‡æ¸¡è‡³æ­£å¸¸æ—¶è¦è°ƒç”¨çš„ URL (webhook) 的列表。"
msgid "A list of access rules that define access from IP to Share."
msgstr "用于定义通过 IP 访问共享的访问规则的列表。"
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr "当状æ€è¿‡æ¸¡è‡³è­¦æŠ¥æ—¶è¦æ‰§è¡Œçš„æ“作的列表。"
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr "当状æ€è¿‡æ¸¡è‡³æ•°æ®ä¸è¶³æ—¶è¦æ‰§è¡Œçš„æ“作的列表。"
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr "当状æ€è¿‡æ¸¡è‡³æ­£å¸¸æ—¶è¦æ‰§è¡Œçš„æ“作的列表。"
-
msgid "A list of all rules for the QoS policy."
msgstr "QoS 策略的所有规则列表。"
@@ -390,10 +380,6 @@ msgstr "集群实例 IP 的列表。"
msgid "A list of clusters to which this policy is attached."
msgstr "此策略附加至的集群列表。"
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr "与度é‡å€¼å…³è”的维度(任æ„å称/值对)的列表。"
-
msgid "A list of host route dictionaries for the subnet."
msgstr "å­ç½‘的主机路由字典列表。"
@@ -506,9 +492,6 @@ msgid ""
"resource."
msgstr "已签署 URL,用于为工作æµç¨‹èµ„æºä¸­æŒ‡å®šçš„工作æµç¨‹åˆ›å»ºæ‰§è¡Œã€‚"
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "用于处ç†è­¦æŠ¥çš„带签åçš„ URL(Heat 扩展)。"
-
msgid "A signed url to handle the alarm."
msgstr "用于处ç†è­¦æŠ¥çš„带签åçš„ URL。"
@@ -569,9 +552,6 @@ msgstr "AccessPolicy èµ„æº %s 未在堆栈中"
msgid "Action %s not allowed for user"
msgstr "用户ä¸å…许执行动作%s"
-msgid "Action for the RBAC policy."
-msgstr "RBAC 策略的æ“作。"
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "è¦å¯¹ä¸Žè§„则匹é…çš„æµé‡æ‰§è¡Œçš„æ“作。"
@@ -819,10 +799,6 @@ msgid "Attempt to delete user creds with id %(id)s that does not exist"
msgstr "å°è¯•åˆ é™¤çš„用户凭è¯ï¼ˆæ ‡è¯†ä¸º %(id)s)ä¸å­˜åœ¨"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "请å°è¯•åˆ é™¤ watch_rule:%(id)s %(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "请å°è¯•æ›´æ–°å…·æœ‰æ ‡è¯† %(id)s 的堆栈:%(msg)s"
@@ -830,10 +806,6 @@ msgstr "请å°è¯•æ›´æ–°å…·æœ‰æ ‡è¯† %(id)s 的堆栈:%(msg)s"
msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr "å°è¯•æ›´æ–°å¸¦æœ‰ä»¥ä¸‹æ ‡è¯†çš„堆栈:%(id)s %(traversal)s %(msg)s "
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "请å°è¯•æ›´æ–°å…·æœ‰æ ‡è¯† %(id)s 的监视:%(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "å°è¯•ä½¿ç”¨ä¸å¸¦ user_creds çš„ stored_context"
@@ -2366,9 +2338,6 @@ msgstr "è¦ä¸Žç”± EIP 属性指定的 EIP å…³è”的实例标识。"
msgid "Instance ID to associate with EIP."
msgstr "è¦ä¸Ž EIP å…³è”的实例标识。"
-msgid "Instance ID to be restarted."
-msgstr "è¦é‡æ–°å¯åŠ¨çš„实例标识。"
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr "实例至 CFN/CW API 验è¯è¯ä¹¦çš„连接(如果使用了 SSL)。"
@@ -2422,12 +2391,6 @@ msgid "Invalid UUID version (%d)"
msgstr "UUID 版本 (%d) 无效"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr "æ“作 %(action)s 对于对象类型 %(obj_type)s 无效。有效æ“作:%(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "æ“作 %s 无效"
@@ -2494,10 +2457,6 @@ msgstr ""
"为“akiâ€ã€â€œariâ€æˆ–“amiâ€çš„其中之一时,容器格å¼ä¸Žç£ç›˜æ ¼å¼å¿…须匹é…。"
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr "无效 object_type:%(obj_type)s。有效 object_type:%(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr "对å‚æ•° %s 无效的å‚数约æŸï¼Œéœ€è¦åˆ—表"
@@ -2522,10 +2481,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "堆栈å称 %s 无效,必须为字符串"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "çŠ¶æ€ %(state)s 无效,期望下列其中一项:%(expect)s"
-
-#, python-format
msgid "Invalid status %s"
msgstr "çŠ¶æ€ %s 无效"
@@ -2565,10 +2520,6 @@ msgstr "无效时区:%s"
msgid "Invalid type (%s)"
msgstr "类型 (%s) 无效"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "ç›‘è§†çŠ¶æ€ %s 无效"
-
msgid "Ip allocation pools and their ranges."
msgstr "IP 分é…æ± åŠå…¶èŒƒå›´ã€‚"
@@ -2985,9 +2936,6 @@ msgstr "会è¯æŒä¹…性功能的实现方法。"
msgid "Metric name watched by the alarm."
msgstr "警报监视的度é‡å€¼å称。"
-msgid "Metric statistic to evaluate."
-msgstr "è¦è¯„估的度é‡å€¼ç»Ÿè®¡ä¿¡æ¯ã€‚"
-
msgid "Min size of the cluster."
msgstr "集群的最å°å¤§å°ã€‚"
@@ -3369,9 +3317,6 @@ msgstr "å·ç±»åž‹ (OS::Cinder::VolumeType) çš„å称或标识。"
msgid "Names of databases that those users can access on instance creation."
msgstr "在进行实例创建时,那些用户å¯è®¿é—®çš„æ•°æ®åº“çš„å称。"
-msgid "Namespace for the metric."
-msgstr "用于度é‡å€¼çš„å称空间。"
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -3590,9 +3535,6 @@ msgstr "“%(name)sâ€ä»…接å—éžé›¶æ•´æ•°ã€‚"
msgid "Operator used to compare specified statistic with threshold."
msgstr "用于将指定统计信æ¯ä¸Žé˜ˆå€¼è¿›è¡Œæ¯”较的è¿ç®—符。"
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr "用于将指定统计信æ¯ä¸Žé˜ˆå€¼è¿›è¡Œæ¯”较的è¿ç®—符。"
-
msgid "Optional CA cert file to use in SSL connections."
msgstr "SSL 连接中è¦ä½¿ç”¨çš„å¯é€‰ CA è¯ä¹¦æ–‡ä»¶ã€‚"
@@ -4928,10 +4870,6 @@ msgstr "è´Ÿè½½å‡è¡¡å™¨çš„ VIP 端å£ã€‚"
msgid "The VIP subnet of the LoadBalancer."
msgstr "è´Ÿè½½å‡è¡¡å™¨çš„ VIP å­åˆšåˆšã€‚"
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "找ä¸åˆ°ç›‘视规则 (%(watch_name)s)。"
-
msgid "The action or operation requested is invalid"
msgstr "请求的动作或æ“作无效"
@@ -6217,9 +6155,6 @@ msgstr "此防ç«å¢™è§„则所属的防ç«å¢™ç­–略的唯一标识。"
msgid "Unique identifier of the firewall policy used to create the firewall."
msgstr "用于创建防ç«å¢™çš„防ç«å¢™ç­–略的唯一标识。"
-msgid "Unit for the metric."
-msgstr "用于度é‡å€¼çš„å•ä½ã€‚"
-
msgid "Unknown"
msgstr "未知"
@@ -6262,10 +6197,6 @@ msgid "Unknown status: %s"
msgstr "未知状æ€ï¼š%s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "ç›‘è§†çŠ¶æ€ %s 未知"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -6402,9 +6333,6 @@ msgstr "值“%sâ€ä¸æ˜¯æ•´æ•°"
msgid "Value must be a comma-delimited list string: %s"
msgstr "值必须是以逗å·å®šç•Œçš„列表字符串:%s"
-msgid "Value must be a string"
-msgstr "值必须为字符串"
-
#, python-format
msgid "Value must be of type %s"
msgstr "值的类型必须为 %s"
diff --git a/heat/locale/zh_TW/LC_MESSAGES/heat.po b/heat/locale/zh_TW/LC_MESSAGES/heat.po
index f2501102a..b40110dfe 100644
--- a/heat/locale/zh_TW/LC_MESSAGES/heat.po
+++ b/heat/locale/zh_TW/LC_MESSAGES/heat.po
@@ -6,18 +6,18 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-28 16:10+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-04-12 05:32+0000\n"
"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language: zh-TW\n"
+"Language: zh_TW\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.9.6\n"
+"X-Generator: Zanata 4.3.3\n"
"Language-Team: Chinese (Taiwan)\n"
#, python-format
@@ -357,16 +357,6 @@ msgstr "狀態轉移至正常時è¦å‘¼å«çš„ URL(Web 連çµé‰¤ï¼‰æ¸…單。"
msgid "A list of access rules that define access from IP to Share."
msgstr "å­˜å–è¦å‰‡çš„清單,這些è¦å‰‡ç”¨ä¾†å®šç¾©å¾ž IP 到共用項目的存å–。"
-msgid "A list of actions to execute when state transitions to alarm."
-msgstr "狀態轉移至警示時è¦åŸ·è¡Œçš„動作清單。"
-
-msgid ""
-"A list of actions to execute when state transitions to insufficient-data."
-msgstr "狀態轉移至資料ä¸è¶³æ™‚è¦åŸ·è¡Œçš„動作清單。"
-
-msgid "A list of actions to execute when state transitions to ok."
-msgstr "狀態轉移至正常時è¦åŸ·è¡Œçš„動作清單。"
-
msgid "A list of all rules for the QoS policy."
msgstr "æœå‹™å“質原則的所有è¦å‰‡æ¸…單。"
@@ -389,10 +379,6 @@ msgstr "å¢é›†å¯¦ä¾‹ IP 清單。"
msgid "A list of clusters to which this policy is attached."
msgstr "è¦å°‡æ­¤åŽŸå‰‡é€£æŽ¥è‡³çš„å¢é›†æ¸…單。"
-msgid ""
-"A list of dimensions (arbitrary name/value pairs) associated with the metric."
-msgstr "與度é‡ç›¸é—œè¯çš„維度(任æ„å稱/值é…å°ï¼‰æ¸…單。"
-
msgid "A list of host route dictionaries for the subnet."
msgstr "å­ç¶²è·¯çš„主機路由字典清單。"
@@ -505,9 +491,6 @@ msgid ""
"resource."
msgstr "已簽署的 URL,用來為工作æµç¨‹è³‡æºä¸­æŒ‡å®šçš„工作æµç¨‹å»ºç«‹åŸ·è¡Œã€‚"
-msgid "A signed url to handle the alarm (Heat extension)."
-msgstr "è¦ç”¨ä¾†è™•ç†è­¦ç¤ºçš„已簽署 URL。(Heat 延伸)。"
-
msgid "A signed url to handle the alarm."
msgstr "è¦ç”¨ä¾†è™•ç†è­¦ç¤ºçš„已簽署 URL。"
@@ -568,9 +551,6 @@ msgstr "AccessPolicy è³‡æº %s ä¸åœ¨å †ç–Šä¸­"
msgid "Action %s not allowed for user"
msgstr "ä¸å®¹è¨±ä½¿ç”¨è€…執行動作 %s"
-msgid "Action for the RBAC policy."
-msgstr "RBAC 原則的動作。"
-
msgid "Action to be performed on the traffic matching the rule."
msgstr "è¦å°èˆ‡è¦å‰‡ç›¸ç¬¦ä¹‹è³‡æ–™æµé‡åŸ·è¡Œçš„動作。"
@@ -819,10 +799,6 @@ msgid "Attempt to delete user creds with id %(id)s that does not exist"
msgstr "嘗試刪除 ID 為 %(id)s 的使用者èªè­‰ï¼Œä½†å®ƒä¸å­˜åœ¨"
#, python-format
-msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
-msgstr "嘗試刪除 ID 為 %(id)s 的 watch_rule:%(msg)s"
-
-#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr "嘗試更新 ID 為 %(id)s 的堆疊:%(msg)s"
@@ -830,10 +806,6 @@ msgstr "嘗試更新 ID 為 %(id)s 的堆疊:%(msg)s"
msgid "Attempt to update a stack with id: %(id)s %(traversal)s %(msg)s"
msgstr "嘗試更新 ID 為 %(id)s 的堆疊:%(traversal)s %(msg)s"
-#, python-format
-msgid "Attempt to update a watch with id: %(id)s %(msg)s"
-msgstr "嘗試更新 ID 為 %(id)s 的監看:%(msg)s"
-
msgid "Attempt to use stored_context with no user_creds"
msgstr "嘗試在ä¸ä½¿ç”¨ user_creds 的情æ³ä¸‹ä½¿ç”¨ stored_context"
@@ -2370,9 +2342,6 @@ msgstr "è¦èˆ‡ EIP 內容所指定的 EIP 產生關è¯çš„實例 ID。"
msgid "Instance ID to associate with EIP."
msgstr "è¦èˆ‡ EIP 產生關è¯çš„實例 ID。"
-msgid "Instance ID to be restarted."
-msgstr "è¦é‡æ–°å•Ÿå‹•çš„實例 ID。"
-
msgid "Instance connection to CFN/CW API validate certs if SSL is used."
msgstr "與 CFN/CW API 驗證憑證的實例連線(如果使用 SSL)。"
@@ -2426,12 +2395,6 @@ msgid "Invalid UUID version (%d)"
msgstr "無效的 UUID 版本 (%d)"
#, python-format
-msgid ""
-"Invalid action %(action)s for object type %(obj_type)s. Valid actions :"
-"%(value)s"
-msgstr "物件類型 %(obj_type)s 的動作 %(action)s 無效。有效動作:%(value)s"
-
-#, python-format
msgid "Invalid action %s"
msgstr "無效的動作 %s"
@@ -2499,10 +2462,6 @@ msgstr ""
"中的一個時,儲存器與ç£ç¢Ÿæ ¼å¼å¿…須相符。"
#, python-format
-msgid "Invalid object_type: %(obj_type)s. Valid object_type :%(value)s"
-msgstr "無效的 object_type:%(obj_type)s。有效的 object_type:%(value)s"
-
-#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
msgstr "åƒæ•¸ %s çš„åƒæ•¸é™åˆ¶ç„¡æ•ˆï¼Œé æœŸç‚ºæ¸…å–®"
@@ -2527,10 +2486,6 @@ msgid "Invalid stack name %s, must be a string"
msgstr "堆疊å稱 %s 無效,必須是字串"
#, python-format
-msgid "Invalid state %(state)s, expecting one of %(expect)s"
-msgstr "無效的狀態 %(state)s,é æœŸ %(expect)s 的其中之一"
-
-#, python-format
msgid "Invalid status %s"
msgstr "無效的狀態 %s"
@@ -2570,10 +2525,6 @@ msgstr "無效的時å€ï¼š%s"
msgid "Invalid type (%s)"
msgstr "無效的類型 (%s)"
-#, python-format
-msgid "Invalid watch state %s"
-msgstr "無效的監看狀態 %s"
-
msgid "Ip allocation pools and their ranges."
msgstr "IP é…置儲存å€åŠå…¶ç¯„åœã€‚"
@@ -2993,9 +2944,6 @@ msgstr "階段作業æŒçºŒæ€§åŠŸèƒ½çš„實作方法。"
msgid "Metric name watched by the alarm."
msgstr "警示所監看的度é‡å稱。"
-msgid "Metric statistic to evaluate."
-msgstr "è¦è©•ä¼°çš„度é‡çµ±è¨ˆè³‡æ–™ã€‚"
-
msgid "Min size of the cluster."
msgstr "å¢é›†å¤§å°ä¸‹é™ã€‚"
@@ -3379,9 +3327,6 @@ msgstr "ç£å€é¡žåž‹çš„å稱或 ID (OS::Cinder::VolumeType)。"
msgid "Names of databases that those users can access on instance creation."
msgstr "建立實例時那些使用者å¯ä»¥å­˜å–的資料庫å稱。"
-msgid "Namespace for the metric."
-msgstr "度é‡çš„å稱空間。"
-
msgid ""
"Namespace to group this software config by when delivered to a server. This "
"may imply what configuration tool is going to perform the configuration."
@@ -3599,9 +3544,6 @@ msgstr "'%(name)s' 僅接å—éžé›¶æ•´æ•¸ã€‚"
msgid "Operator used to compare specified statistic with threshold."
msgstr "用來將所指定統計資料與臨界值相比較的é‹ç®—å­ã€‚"
-msgid "Operator used to compare the specified Statistic with Threshold."
-msgstr "用來將所指定統計資料與臨界值相比較的é‹ç®—å­ã€‚"
-
msgid "Optional CA cert file to use in SSL connections."
msgstr "è¦ç”¨åœ¨ SSL 連線中的é¸ç”¨ CA 憑證檔。"
@@ -4937,10 +4879,6 @@ msgstr "負載平衡器的 VIP 埠。"
msgid "The VIP subnet of the LoadBalancer."
msgstr "負載平衡器的 VIP å­ç¶²è·¯ã€‚"
-#, python-format
-msgid "The Watch Rule (%(watch_name)s) could not be found."
-msgstr "找ä¸åˆ°ç›£çœ‹è¦å‰‡ (%(watch_name)s)。"
-
msgid "The action or operation requested is invalid"
msgstr "所è¦æ±‚的動作或作業無效"
@@ -6227,9 +6165,6 @@ msgstr "此防ç«ç‰†è¦å‰‡æ‰€å±¬ä¹‹é˜²ç«ç‰†åŽŸå‰‡çš„唯一 ID。"
msgid "Unique identifier of the firewall policy used to create the firewall."
msgstr "建立防ç«ç‰†æ™‚所使用之防ç«ç‰†åŽŸå‰‡çš„唯一 ID。"
-msgid "Unit for the metric."
-msgstr "度é‡çš„å–®ä½ã€‚"
-
msgid "Unknown"
msgstr "未知"
@@ -6272,10 +6207,6 @@ msgid "Unknown status: %s"
msgstr "ä¸æ˜Žç‹€æ…‹ï¼š%s"
#, python-format
-msgid "Unknown watch state %s"
-msgstr "ä¸æ˜Žçš„監看狀態 %s"
-
-#, python-format
msgid ""
"Unrecognized value \"%(value)s\" for \"%(name)s\", acceptable values are: "
"true, false."
@@ -6412,9 +6343,6 @@ msgstr "值 '%s' ä¸æ˜¯æ•´æ•¸"
msgid "Value must be a comma-delimited list string: %s"
msgstr "值必須是以逗點定界的清單字串:%s"
-msgid "Value must be a string"
-msgstr "值必須是字串"
-
#, python-format
msgid "Value must be of type %s"
msgstr "值必須是 %s 類型"
diff --git a/heat/objects/event.py b/heat/objects/event.py
index 6c53c68d2..eb4f7587c 100644
--- a/heat/objects/event.py
+++ b/heat/objects/event.py
@@ -79,7 +79,7 @@ class Event(
@property
def resource_properties(self):
if self._resource_properties is None:
- LOG.info('rsrp_prop_data lazy load')
+ LOG.info('rsrc_prop_data lazy load')
rpd_obj = rpd.ResourcePropertiesData.get_by_id(
self._context, self.rsrc_prop_data_id)
self._resource_properties = rpd_obj.data or {}
diff --git a/heat/objects/raw_template.py b/heat/objects/raw_template.py
index f864c56d2..3a1ba35db 100644
--- a/heat/objects/raw_template.py
+++ b/heat/objects/raw_template.py
@@ -89,7 +89,7 @@ class RawTemplate(
@classmethod
def encrypt_hidden_parameters(cls, tmpl):
if cfg.CONF.encrypt_parameters_and_properties:
- for param_name, param in tmpl.env.params.items():
+ for param_name in tmpl.env.params.keys():
if not tmpl.param_schemata()[param_name].hidden:
continue
clear_text_val = tmpl.env.params.get(param_name)
diff --git a/heat/objects/resource.py b/heat/objects/resource.py
index 67cc24a0c..ef4992ac5 100644
--- a/heat/objects/resource.py
+++ b/heat/objects/resource.py
@@ -164,7 +164,7 @@ class Resource(
def properties_data(self):
if (not self._properties_data and
self.rsrc_prop_data_id is not None):
- LOG.info('rsrp_prop_data lazy load')
+ LOG.info('rsrc_prop_data lazy load')
rpd_obj = rpd.ResourcePropertiesData.get_by_id(
self._context, self.rsrc_prop_data_id)
self._properties_data = rpd_obj.data or {}
@@ -275,6 +275,14 @@ class Resource(
return all
@classmethod
+ def get_all_stack_ids_by_root_stack(cls, context, stack_id):
+ resources_db = db_api.resource_get_all_by_root_stack(
+ context,
+ stack_id,
+ stack_id_only=True)
+ return {db_res.stack_id for db_res in six.itervalues(resources_db)}
+
+ @classmethod
def purge_deleted(cls, context, stack_id):
return db_api.resource_purge_deleted(context, stack_id)
diff --git a/heat/objects/watch_data.py b/heat/objects/watch_data.py
deleted file mode 100644
index e972f337f..000000000
--- a/heat/objects/watch_data.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2014 Intel Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""WatchData object."""
-
-from oslo_versionedobjects import base
-from oslo_versionedobjects import fields
-
-from heat.db.sqlalchemy import api as db_api
-from heat.objects import base as heat_base
-from heat.objects import fields as heat_fields
-
-
-class WatchData(
- heat_base.HeatObject,
- base.VersionedObjectDictCompat,
-):
-
- fields = {
- 'id': fields.IntegerField(),
- 'data': heat_fields.JsonField(nullable=True),
- 'watch_rule_id': fields.StringField(),
- 'created_at': fields.DateTimeField(read_only=True),
- 'updated_at': fields.DateTimeField(nullable=True),
- }
-
- @staticmethod
- def _from_db_object(context, rule, db_data):
- for field in rule.fields:
- rule[field] = db_data[field]
- rule._context = context
- rule.obj_reset_changes()
- return rule
-
- @classmethod
- def create(cls, context, values):
- db_data = db_api.watch_data_create(context, values)
- return cls._from_db_object(context, cls(), db_data)
-
- @classmethod
- def get_all(cls, context):
- return [cls._from_db_object(context, cls(), db_data)
- for db_data in db_api.watch_data_get_all(context)]
-
- @classmethod
- def get_all_by_watch_rule_id(cls, context, watch_rule_id):
- return (cls._from_db_object(context, cls(), db_data)
- for db_data in db_api.watch_data_get_all_by_watch_rule_id(
- context, watch_rule_id))
diff --git a/heat/objects/watch_rule.py b/heat/objects/watch_rule.py
deleted file mode 100644
index 36ff0c27d..000000000
--- a/heat/objects/watch_rule.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2014 Intel Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""WatchRule object."""
-
-from oslo_versionedobjects import base
-from oslo_versionedobjects import fields
-
-from heat.db.sqlalchemy import api as db_api
-from heat.objects import base as heat_base
-from heat.objects import fields as heat_fields
-from heat.objects import watch_data
-
-
-class WatchRule(
- heat_base.HeatObject,
- base.VersionedObjectDictCompat,
-):
-
- fields = {
- 'id': fields.IntegerField(),
- 'name': fields.StringField(nullable=True),
- 'rule': heat_fields.JsonField(nullable=True),
- 'state': fields.StringField(nullable=True),
- 'last_evaluated': fields.DateTimeField(nullable=True),
- 'stack_id': fields.StringField(),
- 'watch_data': fields.ListOfObjectsField(watch_data.WatchData),
- 'created_at': fields.DateTimeField(read_only=True),
- 'updated_at': fields.DateTimeField(nullable=True),
- }
-
- @staticmethod
- def _from_db_object(context, rule, db_rule):
- for field in rule.fields:
- if field == 'watch_data':
- rule[field] = watch_data.WatchData.get_all_by_watch_rule_id(
- context, db_rule['id'])
- else:
- rule[field] = db_rule[field]
- rule._context = context
- rule.obj_reset_changes()
- return rule
-
- @classmethod
- def get_by_id(cls, context, rule_id):
- db_rule = db_api.watch_rule_get(context, rule_id)
- return cls._from_db_object(context, cls(), db_rule)
-
- @classmethod
- def get_by_name(cls, context, watch_rule_name):
- db_rule = db_api.watch_rule_get_by_name(context, watch_rule_name)
- return cls._from_db_object(context, cls(), db_rule)
-
- @classmethod
- def get_all(cls, context):
- return [cls._from_db_object(context, cls(), db_rule)
- for db_rule in db_api.watch_rule_get_all(context)]
-
- @classmethod
- def get_all_by_stack(cls, context, stack_id):
- return [cls._from_db_object(context, cls(), db_rule)
- for db_rule in db_api.watch_rule_get_all_by_stack(context,
- stack_id)]
-
- @classmethod
- def update_by_id(cls, context, watch_id, values):
- db_api.watch_rule_update(context, watch_id, values)
-
- @classmethod
- def create(cls, context, values):
- return cls._from_db_object(context, cls(),
- db_api.watch_rule_create(context, values))
-
- @classmethod
- def delete(cls, context, watch_id):
- db_api.watch_rule_delete(context, watch_id)
diff --git a/heat/policies/__init__.py b/heat/policies/__init__.py
index b35b935a3..1afb736e1 100644
--- a/heat/policies/__init__.py
+++ b/heat/policies/__init__.py
@@ -13,10 +13,30 @@
import itertools
+from heat.policies import actions
from heat.policies import base
+from heat.policies import build_info
+from heat.policies import cloudformation
+from heat.policies import events
+from heat.policies import resource
+from heat.policies import resource_types
+from heat.policies import service
+from heat.policies import software_configs
+from heat.policies import software_deployments
+from heat.policies import stacks
def list_rules():
return itertools.chain(
base.list_rules(),
+ actions.list_rules(),
+ build_info.list_rules(),
+ cloudformation.list_rules(),
+ events.list_rules(),
+ resource.list_rules(),
+ resource_types.list_rules(),
+ service.list_rules(),
+ software_configs.list_rules(),
+ software_deployments.list_rules(),
+ stacks.list_rules(),
)
diff --git a/heat/policies/actions.py b/heat/policies/actions.py
new file mode 100644
index 000000000..4dd45fcb0
--- /dev/null
+++ b/heat/policies/actions.py
@@ -0,0 +1,37 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'actions:%s'
+
+actions_policies = [
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'action',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Performs non-lifecycle operations on the stack '
+ '(Snapshot, Resume, Cancel update, or check stack resources).',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'actions',
+ 'method': 'POST'
+ }
+ ]
+ )
+]
+
+
+def list_rules():
+ return actions_policies
diff --git a/heat/policies/build_info.py b/heat/policies/build_info.py
new file mode 100644
index 000000000..066bf7bdb
--- /dev/null
+++ b/heat/policies/build_info.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'build_info:%s'
+
+build_info_policies = [
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'build_info',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show build information.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/build_info',
+ 'method': 'GET'
+ }
+ ]
+ )
+]
+
+
+def list_rules():
+ return build_info_policies
diff --git a/heat/policies/cloudformation.py b/heat/policies/cloudformation.py
new file mode 100644
index 000000000..aa61fa9a0
--- /dev/null
+++ b/heat/policies/cloudformation.py
@@ -0,0 +1,66 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+# These policies are for AWS CloudFormation-like APIs, so we won't list out
+# the URI paths in rules.
+
+POLICY_ROOT = 'cloudformation:%s'
+
+cloudformation_policies = [
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'ListStacks',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'CreateStack',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'DescribeStacks',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'DeleteStack',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'UpdateStack',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'CancelUpdateStack',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'DescribeStackEvents',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'ValidateTemplate',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'GetTemplate',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'EstimateTemplateCost',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'DescribeStackResource',
+ check_str=base.RULE_ALLOW_EVERYBODY),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'DescribeStackResources',
+ check_str=base.RULE_DENY_STACK_USER),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'ListStackResources',
+ check_str=base.RULE_DENY_STACK_USER)
+]
+
+
+def list_rules():
+ return cloudformation_policies
diff --git a/heat/policies/events.py b/heat/policies/events.py
new file mode 100644
index 000000000..b6c1f21fa
--- /dev/null
+++ b/heat/policies/events.py
@@ -0,0 +1,48 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'events:%s'
+
+events_policies = [
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List events.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'events',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show event.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'resources/{resource_name}/events/{event_id}',
+ 'method': 'GET'
+ }
+ ]
+ )
+]
+
+
+def list_rules():
+ return events_policies
diff --git a/heat/policies/resource.py b/heat/policies/resource.py
new file mode 100644
index 000000000..8be1c2a40
--- /dev/null
+++ b/heat/policies/resource.py
@@ -0,0 +1,84 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'resource:%s'
+
+resource_policies = [
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List resources.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'resources',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'metadata',
+ check_str=base.RULE_ALLOW_EVERYBODY,
+ description='Show resource metadata.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'resources/{resource_name}/metadata',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'signal',
+ check_str=base.RULE_ALLOW_EVERYBODY,
+ description='Signal resource.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'resources/{resource_name}/signal',
+ 'method': 'POST'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'mark_unhealthy',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Mark resource as unhealthy.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'resources/{resource_name_or_physical_id}',
+ 'method': 'PATCH'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show resource.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'resources/{resource_name}',
+ 'method': 'GET'
+ }
+ ]
+ )
+]
+
+
+def list_rules():
+ return resource_policies
diff --git a/heat/policies/resource_types.py b/heat/policies/resource_types.py
new file mode 100644
index 000000000..a706aea0f
--- /dev/null
+++ b/heat/policies/resource_types.py
@@ -0,0 +1,69 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'resource_types:%s'
+
+resource_types_policies = [
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Nova::Flavor',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Cinder::EncryptedVolumeType',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Cinder::VolumeType',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Cinder::Quota',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Neutron::Quota',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Nova::Quota',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Manila::ShareType',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Neutron::ProviderNet',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Neutron::QoSPolicy',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Neutron::QoSBandwidthLimitRule',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Neutron::Segment',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Nova::HostAggregate',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Cinder::QoSSpecs',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Cinder::QoSAssociation',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Keystone::*',
+ check_str=base.RULE_PROJECT_ADMIN)
+]
+
+
+def list_rules():
+ return resource_types_policies
diff --git a/heat/policies/service.py b/heat/policies/service.py
new file mode 100644
index 000000000..9bf86a696
--- /dev/null
+++ b/heat/policies/service.py
@@ -0,0 +1,27 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'service:%s'
+
+service_policies = [
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_CONTEXT_IS_ADMIN)
+]
+
+
+def list_rules():
+ return service_policies
diff --git a/heat/policies/software_configs.py b/heat/policies/software_configs.py
new file mode 100644
index 000000000..72f6f2c99
--- /dev/null
+++ b/heat/policies/software_configs.py
@@ -0,0 +1,79 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'software_configs:%s'
+
+software_configs_policies = [
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'global_index',
+ check_str=base.RULE_DENY_EVERYBODY,
+ description='List configs globally.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_configs',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List configs.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_configs',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'create',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Create config.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_configs',
+ 'method': 'POST'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show config details.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_configs/{config_id}',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'delete',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Delete config.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_configs/{config_id}',
+ 'method': 'DELETE'
+ }
+ ]
+ )
+]
+
+
+def list_rules():
+ return software_configs_policies
diff --git a/heat/policies/software_deployments.py b/heat/policies/software_deployments.py
new file mode 100644
index 000000000..05f73d586
--- /dev/null
+++ b/heat/policies/software_deployments.py
@@ -0,0 +1,91 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'software_deployments:%s'
+
+software_deployments_policies = [
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List deployments.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_deployments',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'create',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Create deployment.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_deployments',
+ 'method': 'POST'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show deployment details.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'update',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Update deployment.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
+ 'method': 'PUT'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'delete',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Delete deployment.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
+ 'method': 'DELETE'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'metadata',
+ check_str=base.RULE_ALLOW_EVERYBODY,
+ description='Show server configuration metadata.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/software_deployments/metadata/'
+ '{server_id}',
+ 'method': 'GET'
+ }
+ ]
+ )
+]
+
+
+def list_rules():
+ return software_deployments_policies
diff --git a/heat/policies/stacks.py b/heat/policies/stacks.py
new file mode 100644
index 000000000..7332a69a3
--- /dev/null
+++ b/heat/policies/stacks.py
@@ -0,0 +1,370 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_policy import policy
+
+from heat.policies import base
+
+POLICY_ROOT = 'stacks:%s'
+
+stacks_policies = [
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'abandon',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Abandon stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'abandon',
+ 'method': 'DELETE'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'create',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Create stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks',
+ 'method': 'POST'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'delete',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Delete stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
+ 'method': 'DELETE'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'detail',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List stacks in detail.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'export',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Export stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'export',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'generate_template',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Generate stack template.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'template',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'global_index',
+ check_str=base.RULE_DENY_EVERYBODY,
+ description='List stacks globally.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List stacks.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'list_resource_types',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List resource types.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/resource_types',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'list_template_versions',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List template versions.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/template_versions',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'list_template_functions',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List template functions.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/template_versions/'
+ '{template_version}/functions',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'lookup',
+ check_str=base.RULE_ALLOW_EVERYBODY,
+ description='Find stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_identity}',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'preview',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Preview stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/preview',
+ 'method': 'POST'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'resource_schema',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show resource type schema.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/resource_types/{type_name}',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_identity}',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'template',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Get stack template.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'template',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'environment',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Get stack environment.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'environment',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'files',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Get stack files.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'files',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'update',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Update stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
+ 'method': 'PUT'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'update_patch',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Update stack (PATCH).',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
+ 'method': 'PATCH'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'preview_update',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Preview update stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'preview',
+ 'method': 'PUT'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'preview_update_patch',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Preview update stack (PATCH).',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'preview',
+ 'method': 'PATCH'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'validate_template',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Validate template.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/validate',
+ 'method': 'POST'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Snapshot Stack.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'snapshots',
+ 'method': 'POST'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'show_snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show snapshot.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'snapshots/{snapshot_id}',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'delete_snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Delete snapshot.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'snapshots/{snapshot_id}',
+ 'method': 'DELETE'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'list_snapshots',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List snapshots.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'snapshots',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'restore_snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Restore snapshot.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'snapshots/{snapshot_id}/restore',
+ 'method': 'POST'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'list_outputs',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='List outputs.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'outputs',
+ 'method': 'GET'
+ }
+ ]
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'show_output',
+ check_str=base.RULE_DENY_STACK_USER,
+ description='Show outputs.',
+ operations=[
+ {
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
+ 'outputs/{output_key}',
+ 'method': 'GET'
+ }
+ ]
+ )
+]
+
+
+def list_rules():
+ return stacks_policies
diff --git a/heat/rpc/api.py b/heat/rpc/api.py
index 90cdc8177..1763718eb 100644
--- a/heat/rpc/api.py
+++ b/heat/rpc/api.py
@@ -127,66 +127,6 @@ NOTIFY_KEYS = (
STACK_TAGS,
)
-# This is the representation of a watch we expose to the API via RPC
-WATCH_KEYS = (
- WATCH_ACTIONS_ENABLED, WATCH_ALARM_ACTIONS, WATCH_TOPIC,
- WATCH_UPDATED_TIME, WATCH_DESCRIPTION, WATCH_NAME,
- WATCH_COMPARISON, WATCH_DIMENSIONS, WATCH_PERIODS,
- WATCH_INSUFFICIENT_ACTIONS, WATCH_METRIC_NAME, WATCH_NAMESPACE,
- WATCH_OK_ACTIONS, WATCH_PERIOD, WATCH_STATE_REASON,
- WATCH_STATE_REASON_DATA, WATCH_STATE_UPDATED_TIME, WATCH_STATE_VALUE,
- WATCH_STATISTIC, WATCH_THRESHOLD, WATCH_UNIT, WATCH_STACK_ID,
-) = (
- 'actions_enabled', 'actions', 'topic',
- 'updated_time', 'description', 'name',
- 'comparison', 'dimensions', 'periods',
- 'insufficient_actions', 'metric_name', 'namespace',
- 'ok_actions', 'period', 'state_reason',
- 'state_reason_data', 'state_updated_time', 'state_value',
- 'statistic', 'threshold', 'unit', 'stack_id',
-)
-
-# Alternate representation of a watch rule to align with DB format
-# FIXME : These align with AWS naming for compatibility with the
-# current cfn-push-stats & metadata server, fix when we've ported
-# cfn-push-stats to use the Cloudwatch server and/or moved metric
-# collection into ceilometer, these should just be WATCH_KEYS
-# or each field should be stored separately in the DB watch_data
-# table if we stick to storing watch data in the heat DB
-WATCH_RULE_KEYS = (
- RULE_ACTIONS_ENABLED, RULE_ALARM_ACTIONS, RULE_TOPIC,
- RULE_UPDATED_TIME, RULE_DESCRIPTION, RULE_NAME,
- RULE_COMPARISON, RULE_DIMENSIONS, RULE_PERIODS,
- RULE_INSUFFICIENT_ACTIONS, RULE_METRIC_NAME, RULE_NAMESPACE,
- RULE_OK_ACTIONS, RULE_PERIOD, RULE_STATE_REASON,
- RULE_STATE_REASON_DATA, RULE_STATE_UPDATED_TIME, RULE_STATE_VALUE,
- RULE_STATISTIC, RULE_THRESHOLD, RULE_UNIT, RULE_STACK_NAME,
-) = (
- 'ActionsEnabled', 'AlarmActions', 'AlarmArn',
- 'AlarmConfigurationUpdatedTimestamp', 'AlarmDescription', 'AlarmName',
- 'ComparisonOperator', 'Dimensions', 'EvaluationPeriods',
- 'InsufficientDataActions', 'MetricName', 'Namespace',
- 'OKActions', 'Period', 'StateReason',
- 'StateReasonData', 'StateUpdatedTimestamp', 'StateValue',
- 'Statistic', 'Threshold', 'Unit', 'StackName',
-)
-
-WATCH_STATES = (
- WATCH_STATE_OK, WATCH_STATE_ALARM, WATCH_STATE_NODATA,
- WATCH_STATE_SUSPENDED, WATCH_STATE_CEILOMETER_CONTROLLED
-) = (
- 'NORMAL', 'ALARM', 'NODATA',
- 'SUSPENDED', 'CEILOMETER_CONTROLLED'
-)
-
-WATCH_DATA_KEYS = (
- WATCH_DATA_ALARM, WATCH_DATA_METRIC, WATCH_DATA_TIME,
- WATCH_DATA_NAMESPACE, WATCH_DATA
-) = (
- 'watch_name', 'metric_name', 'timestamp',
- 'namespace', 'data'
-)
-
VALIDATE_PARAM_KEYS = (
PARAM_TYPE, PARAM_DEFAULT, PARAM_NO_ECHO,
PARAM_ALLOWED_VALUES, PARAM_ALLOWED_PATTERN, PARAM_MAX_LENGTH,
diff --git a/heat/rpc/client.py b/heat/rpc/client.py
index d5b4b677b..2a6cc23c8 100644
--- a/heat/rpc/client.py
+++ b/heat/rpc/client.py
@@ -676,60 +676,6 @@ class EngineClient(object):
resource_status_reason=resource_status_reason),
version='1.26')
- def create_watch_data(self, ctxt, watch_name, stats_data):
- """Creates data for CloudWatch and WaitConditions.
-
- This could be used by CloudWatch and WaitConditions and treat HA
- service events like any other CloudWatch.
-
- :param ctxt: RPC context.
- :param watch_name: Name of the watch/alarm
- :param stats_data: The data to post.
- """
- return self.call(ctxt, self.make_msg('create_watch_data',
- watch_name=watch_name,
- stats_data=stats_data))
-
- def show_watch(self, ctxt, watch_name):
- """Returns the attributes of one watch/alarm.
-
- The show_watch method returns the attributes of one watch
- or all watches if no watch_name is passed.
-
- :param ctxt: RPC context.
- :param watch_name: Name of the watch/alarm you want to see,
- or None to see all
- """
- return self.call(ctxt, self.make_msg('show_watch',
- watch_name=watch_name))
-
- def show_watch_metric(self, ctxt, metric_namespace=None, metric_name=None):
- """Returns the datapoints for a metric.
-
- The show_watch_metric method returns the datapoints associated
- with a specified metric, or all metrics if no metric_name is passed.
-
- :param ctxt: RPC context.
- :param metric_namespace: Name of the namespace you want to see,
- or None to see all
- :param metric_name: Name of the metric you want to see,
- or None to see all
- """
- return self.call(ctxt, self.make_msg('show_watch_metric',
- metric_namespace=metric_namespace,
- metric_name=metric_name))
-
- def set_watch_state(self, ctxt, watch_name, state):
- """Temporarily set the state of a given watch.
-
- :param ctxt: RPC context.
- :param watch_name: Name of the watch
- :param state: State (must be one defined in WatchRule class)
- """
- return self.call(ctxt, self.make_msg('set_watch_state',
- watch_name=watch_name,
- state=state))
-
def get_revision(self, ctxt):
return self.call(ctxt, self.make_msg('get_revision'))
diff --git a/heat/scaling/cooldown.py b/heat/scaling/cooldown.py
index e3724856a..452d64ada 100644
--- a/heat/scaling/cooldown.py
+++ b/heat/scaling/cooldown.py
@@ -11,11 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
+import datetime
from heat.common import exception
from heat.common.i18n import _
from heat.engine import resource
+from oslo_log import log as logging
from oslo_utils import timeutils
import six
@@ -25,11 +26,15 @@ LOG = logging.getLogger(__name__)
class CooldownMixin(object):
"""Utility class to encapsulate Cooldown related logic.
- This class is shared between AutoScalingGroup and ScalingPolicy.
This logic includes both cooldown timestamp comparing and scaling in
progress checking.
"""
- def _check_scaling_allowed(self):
+ def _sanitize_cooldown(self, cooldown):
+ if cooldown is None:
+ return 0
+ return max(0, cooldown)
+
+ def _check_scaling_allowed(self, cooldown):
metadata = self.metadata_get()
if metadata.get('scaling_in_progress'):
LOG.info("Can not perform scaling action: resource %s "
@@ -37,51 +42,58 @@ class CooldownMixin(object):
reason = _('due to scaling activity')
raise resource.NoActionRequired(res_name=self.name,
reason=reason)
- try:
- # Negative values don't make sense, so they are clamped to zero
- cooldown = max(0, self.properties[self.COOLDOWN])
- except TypeError:
- # If not specified, it will be None, same as cooldown == 0
- cooldown = 0
+ cooldown = self._sanitize_cooldown(cooldown)
+ # if both cooldown and cooldown_end not in metadata
+ if all(k not in metadata for k in ('cooldown', 'cooldown_end')):
+ # Note: this is for supporting old version cooldown checking
+ metadata.pop('scaling_in_progress', None)
+ if metadata and cooldown != 0:
+ last_adjust = next(six.iterkeys(metadata))
+ if not timeutils.is_older_than(last_adjust, cooldown):
+ self._log_and_raise_no_action(cooldown)
- if cooldown != 0:
- try:
- if 'cooldown' not in metadata:
- # Note: this is for supporting old version cooldown logic
- if metadata:
- last_adjust = next(six.iterkeys(metadata))
- self._cooldown_check(cooldown, last_adjust)
- else:
- last_adjust = next(six.iterkeys(metadata['cooldown']))
- self._cooldown_check(cooldown, last_adjust)
- except ValueError:
- # occurs when metadata has only {scaling_in_progress: False}
- pass
+ elif 'cooldown_end' in metadata:
+ cooldown_end = next(six.iterkeys(metadata['cooldown_end']))
+ now = timeutils.utcnow().isoformat()
+ if now < cooldown_end:
+ self._log_and_raise_no_action(cooldown)
+
+ elif cooldown != 0:
+ # Note: this is also for supporting old version cooldown checking
+ last_adjust = next(six.iterkeys(metadata['cooldown']))
+ if not timeutils.is_older_than(last_adjust, cooldown):
+ self._log_and_raise_no_action(cooldown)
# Assumes _finished_scaling is called
# after the scaling operation completes
metadata['scaling_in_progress'] = True
self.metadata_set(metadata)
- def _cooldown_check(self, cooldown, last_adjust):
- if not timeutils.is_older_than(last_adjust, cooldown):
- LOG.info("Can not perform scaling action: "
- "resource %(name)s is in cooldown (%(cooldown)s).",
- {'name': self.name,
- 'cooldown': cooldown})
- reason = _('due to cooldown, '
- 'cooldown %s') % cooldown
- raise resource.NoActionRequired(
- res_name=self.name, reason=reason)
+ def _log_and_raise_no_action(self, cooldown):
+ LOG.info("Can not perform scaling action: "
+ "resource %(name)s is in cooldown (%(cooldown)s).",
+ {'name': self.name,
+ 'cooldown': cooldown})
+ reason = _('due to cooldown, '
+ 'cooldown %s') % cooldown
+ raise resource.NoActionRequired(
+ res_name=self.name, reason=reason)
- def _finished_scaling(self, cooldown_reason, size_changed=True):
+ def _finished_scaling(self, cooldown,
+ cooldown_reason, size_changed=True):
# If we wanted to implement the AutoScaling API like AWS does,
# we could maintain event history here, but since we only need
# the latest event for cooldown, just store that for now
metadata = self.metadata_get()
if size_changed:
- now = timeutils.utcnow().isoformat()
- metadata['cooldown'] = {now: cooldown_reason}
+ cooldown = self._sanitize_cooldown(cooldown)
+ cooldown_end = (timeutils.utcnow() + datetime.timedelta(
+ seconds=cooldown)).isoformat()
+ if 'cooldown_end' in metadata:
+ cooldown_end = max(
+ next(six.iterkeys(metadata['cooldown_end'])),
+ cooldown_end)
+ metadata['cooldown_end'] = {cooldown_end: cooldown_reason}
metadata['scaling_in_progress'] = False
try:
self.metadata_set(metadata)
diff --git a/heat/tests/api/cfn/test_api_cfn_v1.py b/heat/tests/api/cfn/test_api_cfn_v1.py
index 5845e1784..e005148dd 100644
--- a/heat/tests/api/cfn/test_api_cfn_v1.py
+++ b/heat/tests/api/cfn/test_api_cfn_v1.py
@@ -72,14 +72,11 @@ class CfnStackControllerTest(common.HeatTestCase):
return req
def _stub_enforce(self, req, action, allowed=True):
- self.m.StubOutWithMock(policy.Enforcer, 'enforce')
+ mock_enforce = self.patchobject(policy.Enforcer, 'enforce')
if allowed:
- policy.Enforcer.enforce(req.context, action
- ).AndReturn(True)
+ mock_enforce.return_value = True
else:
- policy.Enforcer.enforce(req.context, action
- ).AndRaise(heat_exception.Forbidden)
- self.m.ReplayAll()
+ mock_enforce.side_effect = heat_exception.Forbidden
# The tests
def test_stackid_addprefix(self):
@@ -118,10 +115,8 @@ class CfnStackControllerTest(common.HeatTestCase):
dummy_req = self._dummy_GET_request(params)
dummy_req.context.roles = ['heat_stack_user']
- self.m.StubOutWithMock(policy.Enforcer, 'enforce')
- policy.Enforcer.enforce(dummy_req.context, 'ListStacks'
- ).AndRaise(AttributeError)
- self.m.ReplayAll()
+ mock_enforce = self.patchobject(policy.Enforcer, 'enforce')
+ mock_enforce.side_effect = AttributeError
self.assertRaises(exception.HeatInternalFailureError,
self.controller._enforce, dummy_req, 'ListStacks')
@@ -512,10 +507,9 @@ class CfnStackControllerTest(common.HeatTestCase):
engine_parms, engine_args,
failure, need_stub=True):
if need_stub:
- self.m.StubOutWithMock(policy.Enforcer, 'enforce')
+ mock_enforce = self.patchobject(policy.Enforcer, 'enforce')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
- policy.Enforcer.enforce(req_context,
- 'CreateStack').AndReturn(True)
+ mock_enforce.return_value = True
# Insert an engine RPC error and ensure we map correctly to the
# heat exception type
diff --git a/heat/tests/api/cloudwatch/__init__.py b/heat/tests/api/cloudwatch/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/heat/tests/api/cloudwatch/__init__.py
+++ /dev/null
diff --git a/heat/tests/api/cloudwatch/test_api_cloudwatch.py b/heat/tests/api/cloudwatch/test_api_cloudwatch.py
deleted file mode 100644
index 065737ccb..000000000
--- a/heat/tests/api/cloudwatch/test_api_cloudwatch.py
+++ /dev/null
@@ -1,539 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-from oslo_config import fixture as config_fixture
-
-from heat.api.aws import exception
-import heat.api.cloudwatch.watch as watches
-from heat.common import policy
-from heat.common import wsgi
-from heat.rpc import api as rpc_api
-from heat.rpc import client as rpc_client
-from heat.tests import common
-from heat.tests import utils
-
-
-class WatchControllerTest(common.HeatTestCase):
- """Tests the API class WatchController.
-
- Tests the API class which acts as the WSGI controller,
- the endpoint processing API requests after they are routed
- """
-
- def setUp(self):
- super(WatchControllerTest, self).setUp()
- self.path = os.path.dirname(os.path.realpath(__file__))
- self.policy_path = self.path + "/../../policy/"
- self.fixture = self.useFixture(config_fixture.Config())
- self.fixture.conf(args=['--config-dir', self.policy_path])
- self.topic = rpc_api.ENGINE_TOPIC
- self.api_version = '1.0'
-
- # Create WSGI controller instance
- class DummyConfig(object):
- bind_port = 8003
- cfgopts = DummyConfig()
- self.controller = watches.WatchController(options=cfgopts)
- self.controller.policy.enforcer.policy_path = (self.policy_path +
- 'deny_stack_user.json')
- self.addCleanup(self.m.VerifyAll)
-
- def _dummy_GET_request(self, params=None):
- # Mangle the params dict into a query string
- params = params or {}
- qs = "&".join(["=".join([k, str(params[k])]) for k in params])
- environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': qs}
- req = wsgi.Request(environ)
- req.context = utils.dummy_context()
- return req
-
- # The tests
- def test_reformat_dimensions(self):
-
- dims = [{'StackId': u'21617058-781e-4262-97ab-5f9df371ee52',
- 'Foo': 'bar'}]
- self.assertEqual(
- [{'Name': 'Foo',
- 'Value': 'bar'},
- {'Name': 'StackId',
- 'Value': u'21617058-781e-4262-97ab-5f9df371ee52'}],
- sorted((self.controller._reformat_dimensions(dims)),
- key=lambda k: k['Name']))
-
- def test_enforce_default(self):
- self.m.ReplayAll()
- params = {'Action': 'ListMetrics'}
- dummy_req = self._dummy_GET_request(params)
- self.controller.policy.policy_path = None
- response = self.controller._enforce(dummy_req, 'ListMetrics')
- self.assertIsNone(response)
- self.m.VerifyAll()
-
- def test_enforce_denied(self):
- self.m.ReplayAll()
- params = {'Action': 'ListMetrics'}
- dummy_req = self._dummy_GET_request(params)
- dummy_req.context.roles = ['heat_stack_user']
- self.controller.policy.policy_path = (self.policy_path +
- 'deny_stack_user.json')
- self.assertRaises(exception.HeatAccessDeniedError,
- self.controller._enforce, dummy_req, 'ListMetrics')
- self.m.VerifyAll()
-
- def test_enforce_ise(self):
- params = {'Action': 'ListMetrics'}
- dummy_req = self._dummy_GET_request(params)
- dummy_req.context.roles = ['heat_stack_user']
-
- self.m.StubOutWithMock(policy.Enforcer, 'enforce')
- policy.Enforcer.enforce(dummy_req.context, 'ListMetrics'
- ).AndRaise(AttributeError)
- self.m.ReplayAll()
-
- self.controller.policy.policy_path = (self.policy_path +
- 'deny_stack_user.json')
- self.assertRaises(exception.HeatInternalFailureError,
- self.controller._enforce, dummy_req, 'ListMetrics')
- self.m.VerifyAll()
-
- def test_delete(self):
- # Not yet implemented, should raise HeatAPINotImplementedError
- params = {'Action': 'DeleteAlarms'}
- dummy_req = self._dummy_GET_request(params)
- result = self.controller.delete_alarms(dummy_req)
- self.assertIsInstance(result, exception.HeatAPINotImplementedError)
-
- def test_describe_alarm_history(self):
- # Not yet implemented, should raise HeatAPINotImplementedError
- params = {'Action': 'DescribeAlarmHistory'}
- dummy_req = self._dummy_GET_request(params)
- result = self.controller.describe_alarm_history(dummy_req)
- self.assertIsInstance(result, exception.HeatAPINotImplementedError)
-
- def test_describe_all(self):
- watch_name = None # Get all watches
-
- # Format a dummy GET request to pass into the WSGI handler
- params = {'Action': 'DescribeAlarms'}
- dummy_req = self._dummy_GET_request(params)
-
- # Stub out the RPC call to the engine with a pre-canned response
- engine_resp = [{u'state_updated_time': u'2012-08-30T14:13:21Z',
- u'stack_id': u'21617058-781e-4262-97ab-5f9df371ee52',
- u'period': u'300',
- u'actions': [u'WebServerRestartPolicy'],
- u'topic': None,
- u'periods': u'1',
- u'statistic': u'SampleCount',
- u'threshold': u'2',
- u'unit': None,
- u'state_reason': None,
- u'dimensions': [],
- u'namespace': u'system/linux',
- u'state_value': u'NORMAL',
- u'ok_actions': None,
- u'description': u'Restart the WikiDatabase',
- u'actions_enabled': None,
- u'state_reason_data': None,
- u'insufficient_actions': None,
- u'metric_name': u'ServiceFailure',
- u'comparison': u'GreaterThanThreshold',
- u'name': u'HttpFailureAlarm',
- u'updated_time': u'2012-08-30T14:10:46Z'}]
-
- self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
- rpc_client.EngineClient.call(
- dummy_req.context,
- ('show_watch', {'watch_name': watch_name})
- ).AndReturn(engine_resp)
-
- self.m.ReplayAll()
-
- expected = {'DescribeAlarmsResponse': {'DescribeAlarmsResult':
- {'MetricAlarms': [
- {'EvaluationPeriods': u'1',
- 'StateReasonData': None,
- 'AlarmArn': None,
- 'StateUpdatedTimestamp': u'2012-08-30T14:13:21Z',
- 'AlarmConfigurationUpdatedTimestamp':
- u'2012-08-30T14:10:46Z',
- 'AlarmActions': [u'WebServerRestartPolicy'],
- 'Threshold': u'2',
- 'AlarmDescription': u'Restart the WikiDatabase',
- 'Namespace': u'system/linux',
- 'Period': u'300',
- 'StateValue': u'NORMAL',
- 'ComparisonOperator': u'GreaterThanThreshold',
- 'AlarmName': u'HttpFailureAlarm',
- 'Unit': None,
- 'Statistic': u'SampleCount',
- 'StateReason': None,
- 'InsufficientDataActions': None,
- 'OKActions': None,
- 'MetricName': u'ServiceFailure',
- 'ActionsEnabled': None,
- 'Dimensions':
- [{'Name': 'StackId',
- 'Value': u'21617058-781e-4262-97ab-5f9df371ee52'}]
- }]}}}
-
- # Call the list controller function and compare the response
- self.assertEqual(expected, self.controller.describe_alarms(dummy_req))
-
- def test_describe_alarms_for_metric(self):
- # Not yet implemented, should raise HeatAPINotImplementedError
- params = {'Action': 'DescribeAlarmsForMetric'}
- dummy_req = self._dummy_GET_request(params)
- result = self.controller.describe_alarms_for_metric(dummy_req)
- self.assertIsInstance(result, exception.HeatAPINotImplementedError)
-
- def test_disable_alarm_actions(self):
- # Not yet implemented, should raise HeatAPINotImplementedError
- params = {'Action': 'DisableAlarmActions'}
- dummy_req = self._dummy_GET_request(params)
- result = self.controller.disable_alarm_actions(dummy_req)
- self.assertIsInstance(result, exception.HeatAPINotImplementedError)
-
- def test_enable_alarm_actions(self):
- # Not yet implemented, should raise HeatAPINotImplementedError
- params = {'Action': 'EnableAlarmActions'}
- dummy_req = self._dummy_GET_request(params)
- result = self.controller.enable_alarm_actions(dummy_req)
- self.assertIsInstance(result, exception.HeatAPINotImplementedError)
-
- def test_get_metric_statistics(self):
- # Not yet implemented, should raise HeatAPINotImplementedError
- params = {'Action': 'GetMetricStatistics'}
- dummy_req = self._dummy_GET_request(params)
- result = self.controller.get_metric_statistics(dummy_req)
- self.assertIsInstance(result, exception.HeatAPINotImplementedError)
-
- def test_list_metrics_all(self):
- params = {'Action': 'ListMetrics'}
- dummy_req = self._dummy_GET_request(params)
-
- # Stub out the RPC call to the engine with a pre-canned response
- # We dummy three different metrics and namespaces to test
- # filtering by parameter
- engine_resp = [{u'timestamp': u'2012-08-30T15:09:02Z',
- u'watch_name': u'HttpFailureAlarm',
- u'namespace': u'system/linux',
- u'metric_name': u'ServiceFailure',
- u'data': {u'Units': u'Counter', u'Value': 1}},
-
- {u'timestamp': u'2012-08-30T15:10:03Z',
- u'watch_name': u'HttpFailureAlarm2',
- u'namespace': u'system/linux2',
- u'metric_name': u'ServiceFailure2',
- u'data': {u'Units': u'Counter', u'Value': 1}},
-
- {u'timestamp': u'2012-08-30T15:16:03Z',
- u'watch_name': u'HttpFailureAlar3m',
- u'namespace': u'system/linux3',
- u'metric_name': u'ServiceFailure3',
- u'data': {u'Units': u'Counter', u'Value': 1}}]
-
- self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
- # Current engine implementation means we filter in the API
- # and pass None/None for namespace/watch_name which returns
- # all metric data which we post-process in the API
- rpc_client.EngineClient.call(
- dummy_req.context,
- ('show_watch_metric',
- {'metric_namespace': None, 'metric_name': None})
- ).AndReturn(engine_resp)
-
- self.m.ReplayAll()
-
- expected = {'ListMetricsResponse':
- {'ListMetricsResult':
- {'Metrics': [{'Namespace': u'system/linux',
- 'Dimensions':
- [{'Name': 'AlarmName',
- 'Value': u'HttpFailureAlarm'},
- {'Name': 'Timestamp',
- 'Value': u'2012-08-30T15:09:02Z'},
- {'Name': u'Units',
- 'Value': u'Counter'},
- {'Name': u'Value',
- 'Value': 1}],
- 'MetricName': u'ServiceFailure'},
- {'Namespace': u'system/linux2',
- 'Dimensions':
- [{'Name': 'AlarmName',
- 'Value': u'HttpFailureAlarm2'},
- {'Name': 'Timestamp',
- 'Value': u'2012-08-30T15:10:03Z'},
- {'Name': u'Units',
- 'Value': u'Counter'},
- {'Name': u'Value',
- 'Value': 1}],
- 'MetricName': u'ServiceFailure2'},
- {'Namespace': u'system/linux3',
- 'Dimensions':
- [{'Name': 'AlarmName',
- 'Value': u'HttpFailureAlar3m'},
- {'Name': 'Timestamp',
- 'Value': u'2012-08-30T15:16:03Z'},
- {'Name': u'Units',
- 'Value': u'Counter'},
- {'Name': u'Value',
- 'Value': 1}],
- 'MetricName': u'ServiceFailure3'}]}}}
-
- response = self.controller.list_metrics(dummy_req)
- metrics = (response['ListMetricsResponse']['ListMetricsResult']
- ['Metrics'])
- metrics[0]['Dimensions'] = sorted(
- metrics[0]['Dimensions'], key=lambda k: k['Name'])
- metrics[1]['Dimensions'] = sorted(
- metrics[1]['Dimensions'], key=lambda k: k['Name'])
- metrics[2]['Dimensions'] = sorted(
- metrics[2]['Dimensions'], key=lambda k: k['Name'])
- metrics = sorted(metrics, key=lambda k: k['MetricName'])
- response['ListMetricsResponse']['ListMetricsResult'] = (
- {'Metrics': metrics})
- # First pass no query paramters filtering, should get all three
- self.assertEqual(expected, response)
-
- def test_list_metrics_filter_name(self):
-
- # Add a MetricName filter, so we should only get one of the three
- params = {'Action': 'ListMetrics',
- 'MetricName': 'ServiceFailure'}
- dummy_req = self._dummy_GET_request(params)
-
- # Stub out the RPC call to the engine with a pre-canned response
- # We dummy three different metrics and namespaces to test
- # filtering by parameter
- engine_resp = [{u'timestamp': u'2012-08-30T15:09:02Z',
- u'watch_name': u'HttpFailureAlarm',
- u'namespace': u'system/linux',
- u'metric_name': u'ServiceFailure',
- u'data': {u'Units': u'Counter', u'Value': 1}},
-
- {u'timestamp': u'2012-08-30T15:10:03Z',
- u'watch_name': u'HttpFailureAlarm2',
- u'namespace': u'system/linux2',
- u'metric_name': u'ServiceFailure2',
- u'data': {u'Units': u'Counter', u'Value': 1}},
-
- {u'timestamp': u'2012-08-30T15:16:03Z',
- u'watch_name': u'HttpFailureAlar3m',
- u'namespace': u'system/linux3',
- u'metric_name': u'ServiceFailure3',
- u'data': {u'Units': u'Counter', u'Value': 1}}]
-
- self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
- # Current engine implementation means we filter in the API
- # and pass None/None for namespace/watch_name which returns
- # all metric data which we post-process in the API
- rpc_client.EngineClient.call(
- dummy_req.context,
- ('show_watch_metric',
- {'metric_namespace': None, 'metric_name': None})
- ).AndReturn(engine_resp)
-
- self.m.ReplayAll()
-
- expected = {'ListMetricsResponse':
- {'ListMetricsResult':
- {'Metrics':
- [{'Namespace': u'system/linux',
- 'Dimensions':
- [{'Name': 'AlarmName',
- 'Value': u'HttpFailureAlarm'},
- {'Name': 'Timestamp',
- 'Value': u'2012-08-30T15:09:02Z'},
- {'Name': u'Units',
- 'Value': u'Counter'},
- {'Name': u'Value',
- 'Value': 1}],
- 'MetricName': u'ServiceFailure'}]}}}
- response = self.controller.list_metrics(dummy_req)
- metrics = (response['ListMetricsResponse']['ListMetricsResult']
- ['Metrics'])
- metrics[0]['Dimensions'] = sorted(
- metrics[0]['Dimensions'], key=lambda k: k['Name'])
- response['ListMetricsResponse']['ListMetricsResult'] = (
- {'Metrics': metrics})
- # First pass no query paramters filtering, should get all three
- self.assertEqual(expected, response)
-
- def test_list_metrics_filter_namespace(self):
-
- # Add a Namespace filter and change the engine response so
- # we should get two responses
- params = {'Action': 'ListMetrics',
- 'Namespace': 'atestnamespace/foo'}
- dummy_req = self._dummy_GET_request(params)
-
- # Stub out the RPC call to the engine with a pre-canned response
- # We dummy three different metrics and namespaces to test
- # filtering by parameter
- engine_resp = [{u'timestamp': u'2012-08-30T15:09:02Z',
- u'watch_name': u'HttpFailureAlarm',
- u'namespace': u'atestnamespace/foo',
- u'metric_name': u'ServiceFailure',
- u'data': {u'Units': u'Counter', u'Value': 1}},
-
- {u'timestamp': u'2012-08-30T15:10:03Z',
- u'watch_name': u'HttpFailureAlarm2',
- u'namespace': u'atestnamespace/foo',
- u'metric_name': u'ServiceFailure2',
- u'data': {u'Units': u'Counter', u'Value': 1}},
-
- {u'timestamp': u'2012-08-30T15:16:03Z',
- u'watch_name': u'HttpFailureAlar3m',
- u'namespace': u'system/linux3',
- u'metric_name': u'ServiceFailure3',
- u'data': {u'Units': u'Counter', u'Value': 1}}]
-
- self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
- # Current engine implementation means we filter in the API
- # and pass None/None for namespace/watch_name which returns
- # all metric data which we post-process in the API
- rpc_client.EngineClient.call(
- dummy_req.context,
- ('show_watch_metric',
- {'metric_namespace': None, 'metric_name': None})
- ).AndReturn(engine_resp)
-
- self.m.ReplayAll()
-
- expected = {'ListMetricsResponse':
- {'ListMetricsResult':
- {'Metrics':
- [{'Namespace': u'atestnamespace/foo',
- 'Dimensions':
- [{'Name': 'AlarmName',
- 'Value': u'HttpFailureAlarm'},
- {'Name': 'Timestamp',
- 'Value': u'2012-08-30T15:09:02Z'},
- {'Name': u'Units',
- 'Value': u'Counter'},
- {'Name': u'Value',
- 'Value': 1}],
- 'MetricName': u'ServiceFailure'},
- {'Namespace': u'atestnamespace/foo',
- 'Dimensions':
- [{'Name': 'AlarmName',
- 'Value': u'HttpFailureAlarm2'},
- {'Name': 'Timestamp',
- 'Value': u'2012-08-30T15:10:03Z'},
- {'Name': u'Units',
- 'Value': u'Counter'},
- {'Name': u'Value',
- 'Value': 1}],
- 'MetricName': u'ServiceFailure2'}]}}}
- response = self.controller.list_metrics(dummy_req)
- metrics = (response['ListMetricsResponse']['ListMetricsResult']
- ['Metrics'])
- metrics[0]['Dimensions'] = sorted(
- metrics[0]['Dimensions'], key=lambda k: k['Name'])
- metrics[1]['Dimensions'] = sorted(
- metrics[1]['Dimensions'], key=lambda k: k['Name'])
- response['ListMetricsResponse']['ListMetricsResult'] = (
- {'Metrics': metrics})
- # First pass no query paramters filtering, should get all three
- self.assertEqual(expected, response)
-
- def test_put_metric_alarm(self):
- # Not yet implemented, should raise HeatAPINotImplementedError
- params = {'Action': 'PutMetricAlarm'}
- dummy_req = self._dummy_GET_request(params)
- result = self.controller.put_metric_alarm(dummy_req)
- self.assertIsInstance(result, exception.HeatAPINotImplementedError)
-
- def test_put_metric_data(self):
-
- params = {u'Namespace': u'system/linux',
- u'MetricData.member.1.Unit': u'Count',
- u'MetricData.member.1.Value': u'1',
- u'MetricData.member.1.MetricName': u'ServiceFailure',
- u'MetricData.member.1.Dimensions.member.1.Name':
- u'AlarmName',
- u'MetricData.member.1.Dimensions.member.1.Value':
- u'HttpFailureAlarm',
- u'Action': u'PutMetricData'}
-
- dummy_req = self._dummy_GET_request(params)
-
- # Stub out the RPC call to verify the engine call parameters
- engine_resp = {}
-
- self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
- rpc_client.EngineClient.call(
- dummy_req.context,
- ('create_watch_data',
- {'watch_name': u'HttpFailureAlarm',
- 'stats_data': {
- 'Namespace': u'system/linux',
- 'ServiceFailure': {
- 'Value': u'1', 'Unit': u'Count', 'Dimensions': []}}})
- ).AndReturn(engine_resp)
-
- self.m.ReplayAll()
-
- expected = {'PutMetricDataResponse': {'PutMetricDataResult':
- {'ResponseMetadata': None}}}
- self.assertEqual(expected, self.controller.put_metric_data(dummy_req))
-
- def test_set_alarm_state(self):
- state_map = {'OK': rpc_api.WATCH_STATE_OK,
- 'ALARM': rpc_api.WATCH_STATE_ALARM,
- 'INSUFFICIENT_DATA': rpc_api.WATCH_STATE_NODATA}
-
- for state in state_map:
- params = {u'StateValue': state,
- u'StateReason': u'',
- u'AlarmName': u'HttpFailureAlarm',
- u'Action': u'SetAlarmState'}
-
- dummy_req = self._dummy_GET_request(params)
-
- # Stub out the RPC call to verify the engine call parameters
- # The real engine response is the same as show_watch but with
- # the state overridden, but since the API doesn't make use
- # of the response at present we pass nothing back from the stub
- engine_resp = {}
-
- self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
- rpc_client.EngineClient.call(
- dummy_req.context,
- ('set_watch_state',
- {'state': state_map[state],
- 'watch_name': u'HttpFailureAlarm'})
- ).AndReturn(engine_resp)
-
- self.m.ReplayAll()
-
- expected = {'SetAlarmStateResponse': {'SetAlarmStateResult': ''}}
- self.assertEqual(expected,
- self.controller.set_alarm_state(dummy_req))
-
- self.m.UnsetStubs()
- self.m.VerifyAll()
-
- def test_set_alarm_state_badstate(self):
- params = {u'StateValue': "baaaaad",
- u'StateReason': u'',
- u'AlarmName': u'HttpFailureAlarm',
- u'Action': u'SetAlarmState'}
- dummy_req = self._dummy_GET_request(params)
-
- # should raise HeatInvalidParameterValueError
- result = self.controller.set_alarm_state(dummy_req)
- self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
diff --git a/heat/tests/api/openstack_v1/test_stacks.py b/heat/tests/api/openstack_v1/test_stacks.py
index f7a9b7686..8b0593918 100644
--- a/heat/tests/api/openstack_v1/test_stacks.py
+++ b/heat/tests/api/openstack_v1/test_stacks.py
@@ -473,7 +473,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
self.controller.index(req, tenant_id=self.tenant)
mock_enforce.assert_called_with(action='global_index',
scope=self.controller.REQUEST_SCOPE,
- is_registered_policy=False,
+ is_registered_policy=True,
context=self.context)
def test_global_index_uses_admin_context(self, mock_enforce):
diff --git a/heat/tests/autoscaling/test_heat_scaling_group.py b/heat/tests/autoscaling/test_heat_scaling_group.py
index e98a0753b..8a107ec25 100644
--- a/heat/tests/autoscaling/test_heat_scaling_group.py
+++ b/heat/tests/autoscaling/test_heat_scaling_group.py
@@ -10,9 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import json
import mock
+from oslo_utils import timeutils
import six
from heat.common import exception
@@ -42,6 +44,13 @@ class TestAutoScalingGroupValidation(common.HeatTestCase):
self.assertRaises(exception.StackValidationFailed,
stack['my-group'].validate)
+ def test_validate_reference_attr_with_none_ref(self):
+ stack = utils.parse_stack(self.parsed)
+ group = stack['my-group']
+ self.patchobject(group, 'referenced_attrs',
+ return_value=set([('something', None)]))
+ self.assertIsNone(group.validate())
+
class TestScalingGroupTags(common.HeatTestCase):
def setUp(self):
@@ -109,6 +118,15 @@ class TestGroupAdjust(common.HeatTestCase):
self.stub_SnapshotConstraint_validate()
self.assertIsNone(self.group.validate())
+ def test_group_metadata_reset(self):
+ self.group.state_set('CREATE', 'COMPLETE')
+ metadata = {'scaling_in_progress': True}
+ self.group.metadata_set(metadata)
+ self.group.handle_metadata_reset()
+
+ new_metadata = self.group.metadata_get()
+ self.assertEqual({'scaling_in_progress': False}, new_metadata)
+
def test_scaling_policy_cooldown_toosoon(self):
dont_call = self.patchobject(self.group, 'resize')
self.patchobject(self.group, '_check_scaling_allowed',
@@ -159,6 +177,7 @@ class TestGroupAdjust(common.HeatTestCase):
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(3)
finished_scaling.assert_called_once_with(
+ None,
'PercentChangeInCapacity : 33',
size_changed=True)
@@ -190,6 +209,7 @@ class TestGroupAdjust(common.HeatTestCase):
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(1)
finished_scaling.assert_called_once_with(
+ None,
'PercentChangeInCapacity : -33',
size_changed=True)
@@ -218,7 +238,8 @@ class TestGroupAdjust(common.HeatTestCase):
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(1)
- finished_scaling.assert_called_once_with('ChangeInCapacity : 1',
+ finished_scaling.assert_called_once_with(None,
+ 'ChangeInCapacity : 1',
size_changed=True)
grouputils.get_size.assert_called_once_with(self.group)
@@ -379,6 +400,101 @@ class HeatScalingGroupAttrTest(common.HeatTestCase):
self.assertRaises(exception.InvalidTemplateAttribute,
self.group.FnGetAtt, 'InstanceList')
+ def _stub_get_attr(self, refids, attrs):
+ def ref_id_fn(res_name):
+ return refids[res_name]
+
+ def attr_fn(args):
+ res_name = args[0]
+ return attrs[res_name]
+
+ inspector = self.group._group_data()
+ member_names = sorted(refids if refids else attrs)
+ self.patchobject(inspector, 'member_names', return_value=member_names)
+
+ def get_output(output_name):
+ outputs = self.group._nested_output_defns(member_names,
+ attr_fn, ref_id_fn)
+ op_defns = {od.name: od for od in outputs}
+ self.assertIn(output_name, op_defns)
+ return op_defns[output_name].get_value()
+
+ orig_get_attr = self.group.FnGetAtt
+
+ def get_attr(attr_name, *path):
+ if not path:
+ attr = attr_name
+ else:
+ attr = (attr_name,) + path
+ # Mock referenced_attrs() so that _nested_output_definitions()
+ # will include the output required for this attribute
+ self.group.referenced_attrs = mock.Mock(return_value=[attr])
+
+ # Pass through to actual function under test
+ return orig_get_attr(attr_name, *path)
+
+ self.group.FnGetAtt = mock.Mock(side_effect=get_attr)
+ self.group.get_output = mock.Mock(side_effect=get_output)
+
+ def test_output_attribute_list(self):
+ values = {str(i): '2.1.3.%d' % i for i in range(1, 4)}
+ self._stub_get_attr({n: 'foo' for n in values}, values)
+
+ expected = [v for k, v in sorted(values.items())]
+ self.assertEqual(expected, self.group.FnGetAtt('outputs_list', 'Bar'))
+
+ def test_output_attribute_dict(self):
+ values = {str(i): '2.1.3.%d' % i for i in range(1, 4)}
+ self._stub_get_attr({n: 'foo' for n in values}, values)
+
+ self.assertEqual(values, self.group.FnGetAtt('outputs', 'Bar'))
+
+ def test_index_dotted_attribute(self):
+ values = {'ab'[i - 1]: '2.1.3.%d' % i for i in range(1, 3)}
+ self._stub_get_attr({'a': 'foo', 'b': 'bar'}, values)
+
+ self.assertEqual(values['a'], self.group.FnGetAtt('resource.0', 'Bar'))
+ self.assertEqual(values['b'], self.group.FnGetAtt('resource.1.Bar'))
+ self.assertRaises(exception.NotFound,
+ self.group.FnGetAtt, 'resource.2')
+
+ def test_output_refs(self):
+ values = {'abc': 'resource-1', 'def': 'resource-2'}
+ self._stub_get_attr(values, {})
+
+ expected = [v for k, v in sorted(values.items())]
+ self.assertEqual(expected, self.group.FnGetAtt('refs'))
+
+ def test_output_refs_map(self):
+ values = {'abc': 'resource-1', 'def': 'resource-2'}
+ self._stub_get_attr(values, {})
+
+ self.assertEqual(values, self.group.FnGetAtt('refs_map'))
+
+ def test_attribute_current_size(self):
+ mock_instances = self.patchobject(grouputils, 'get_size')
+ mock_instances.return_value = 3
+ self.assertEqual(3, self.group.FnGetAtt('current_size'))
+
+ def test_attribute_current_size_with_path(self):
+ mock_instances = self.patchobject(grouputils, 'get_size')
+ mock_instances.return_value = 4
+ self.assertEqual(4, self.group.FnGetAtt('current_size', 'name'))
+
+
+class HeatScalingGroupAttrFallbackTest(common.HeatTestCase):
+ def setUp(self):
+ super(HeatScalingGroupAttrFallbackTest, self).setUp()
+
+ t = template_format.parse(inline_templates.as_heat_template)
+ self.stack = utils.parse_stack(t, params=inline_templates.as_params)
+ self.group = self.stack['my-group']
+ self.assertIsNone(self.group.validate())
+
+ # Raise NotFound when getting output, to force fallback to old-school
+ # grouputils functions
+ self.group.get_output = mock.Mock(side_effect=exception.NotFound)
+
def test_output_attribute_list(self):
mock_members = self.patchobject(grouputils, 'get_members')
members = []
@@ -438,16 +554,6 @@ class HeatScalingGroupAttrTest(common.HeatTestCase):
self.assertEqual(output,
self.group.FnGetAtt('outputs', 'Bar'))
- def test_attribute_current_size(self):
- mock_instances = self.patchobject(grouputils, 'get_size')
- mock_instances.return_value = 3
- self.assertEqual(3, self.group.FnGetAtt('current_size'))
-
- def test_attribute_current_size_with_path(self):
- mock_instances = self.patchobject(grouputils, 'get_size')
- mock_instances.return_value = 4
- self.assertEqual(4, self.group.FnGetAtt('current_size', 'name'))
-
def test_index_dotted_attribute(self):
mock_members = self.patchobject(grouputils, 'get_members')
self.group.nested = mock.Mock()
@@ -455,7 +561,7 @@ class HeatScalingGroupAttrTest(common.HeatTestCase):
output = []
for ip_ex in six.moves.range(0, 2):
inst = mock.Mock()
- inst.name = str(ip_ex)
+ inst.name = 'ab'[ip_ex]
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
output.append('2.1.3.%d' % ip_ex)
members.append(inst)
@@ -645,3 +751,122 @@ class IncorrectUpdatePolicyTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Unknown Property RollingUpdate', six.text_type(exc))
+
+
+class TestCooldownMixin(common.HeatTestCase):
+ def setUp(self):
+ super(TestCooldownMixin, self).setUp()
+ t = template_format.parse(inline_templates.as_heat_template)
+ self.stack = utils.parse_stack(t, params=inline_templates.as_params)
+ self.stack.store()
+ self.group = self.stack['my-group']
+ self.group.state_set('CREATE', 'COMPLETE')
+
+ def test_cooldown_is_in_progress_toosoon(self):
+ cooldown_end = timeutils.utcnow() + datetime.timedelta(seconds=60)
+ previous_meta = {'cooldown_end': {
+ cooldown_end.isoformat(): 'change_in_capacity : 1'}}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertRaises(resource.NoActionRequired,
+ self.group._check_scaling_allowed,
+ 60)
+
+ def test_cooldown_is_in_progress_toosoon_legacy(self):
+ now = timeutils.utcnow()
+ previous_meta = {'cooldown': {
+ now.isoformat(): 'change_in_capacity : 1'}}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertRaises(resource.NoActionRequired,
+ self.group._check_scaling_allowed,
+ 60)
+
+ def test_cooldown_is_in_progress_scaling_unfinished(self):
+ previous_meta = {'scaling_in_progress': True}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertRaises(resource.NoActionRequired,
+ self.group._check_scaling_allowed,
+ 60)
+
+ def test_cooldown_not_in_progress_legacy(self):
+ awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
+ previous_meta = {
+ 'cooldown': {
+ awhile_ago.isoformat(): 'change_in_capacity : 1'
+ },
+ 'scaling_in_progress': False
+ }
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(60))
+
+ def test_cooldown_not_in_progress(self):
+ awhile_after = timeutils.utcnow() + datetime.timedelta(seconds=60)
+ previous_meta = {
+ 'cooldown_end': {
+ awhile_after.isoformat(): 'change_in_capacity : 1'
+ },
+ 'scaling_in_progress': False
+ }
+ timeutils.set_time_override()
+ timeutils.advance_time_seconds(100)
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(60))
+ timeutils.clear_time_override()
+
+ def test_scaling_policy_cooldown_zero(self):
+ now = timeutils.utcnow()
+ previous_meta = {'cooldown_end': {
+ now.isoformat(): 'change_in_capacity : 1'}}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(0))
+
+ def test_scaling_policy_cooldown_none(self):
+ now = timeutils.utcnow()
+ previous_meta = {'cooldown_end': {
+ now.isoformat(): 'change_in_capacity : 1'}}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(None))
+
+ def test_no_cooldown_no_scaling_in_progress(self):
+ # no cooldown entry in the metadata
+ awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
+ previous_meta = {'scaling_in_progress': False,
+ awhile_ago.isoformat(): 'change_in_capacity : 1'}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(60))
+
+ def test_metadata_is_written(self):
+ nowish = timeutils.utcnow()
+ reason = 'cool as'
+ meta_set = self.patchobject(self.group, 'metadata_set')
+ self.patchobject(timeutils, 'utcnow', return_value=nowish)
+ self.group._finished_scaling(60, reason)
+ cooldown_end = nowish + datetime.timedelta(seconds=60)
+ meta_set.assert_called_once_with(
+ {'cooldown_end': {cooldown_end.isoformat(): reason},
+ 'scaling_in_progress': False})
+
+ def test_metadata_is_written_update(self):
+ nowish = timeutils.utcnow()
+ reason = 'cool as'
+ prev_cooldown_end = nowish + datetime.timedelta(seconds=100)
+ previous_meta = {
+ 'cooldown_end': {
+ prev_cooldown_end.isoformat(): 'change_in_capacity : 1'
+ }
+ }
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ meta_set = self.patchobject(self.group, 'metadata_set')
+ self.patchobject(timeutils, 'utcnow', return_value=nowish)
+ self.group._finished_scaling(60, reason)
+ meta_set.assert_called_once_with(
+ {'cooldown_end': {prev_cooldown_end.isoformat(): reason},
+ 'scaling_in_progress': False})
diff --git a/heat/tests/autoscaling/test_heat_scaling_policy.py b/heat/tests/autoscaling/test_heat_scaling_policy.py
index 65fb7695c..dd831379d 100644
--- a/heat/tests/autoscaling/test_heat_scaling_policy.py
+++ b/heat/tests/autoscaling/test_heat_scaling_policy.py
@@ -11,10 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import datetime
-
import mock
-from oslo_utils import timeutils
import six
from heat.common import exception
@@ -69,8 +66,7 @@ class TestAutoScalingPolicy(common.HeatTestCase):
def test_scaling_policy_bad_group(self):
t = template_format.parse(inline_templates.as_heat_template_bad_group)
stack = utils.parse_stack(t)
- up_policy = self.create_scaling_policy(t, stack,
- 'my-policy')
+ up_policy = self.create_scaling_policy(t, stack, 'my-policy')
ex = self.assertRaises(exception.ResourceFailure, up_policy.signal)
self.assertIn('Alarm my-policy could '
@@ -79,33 +75,28 @@ class TestAutoScalingPolicy(common.HeatTestCase):
def test_scaling_policy_adjust_no_action(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
- up_policy = self.create_scaling_policy(t, stack,
- 'my-policy')
+ up_policy = self.create_scaling_policy(t, stack, 'my-policy')
group = stack['my-group']
self.patchobject(group, 'adjust',
side_effect=resource.NoActionRequired())
- mock_fin_scaling = self.patchobject(up_policy, '_finished_scaling')
- with mock.patch.object(up_policy,
- '_check_scaling_allowed') as mock_isa:
- self.assertRaises(resource.NoActionRequired,
- up_policy.handle_signal)
- mock_isa.assert_called_once_with()
- mock_fin_scaling.assert_called_once_with('change_in_capacity : 1',
- size_changed=False)
+ self.assertRaises(resource.NoActionRequired,
+ up_policy.handle_signal)
def test_scaling_policy_adjust_size_changed(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
- up_policy = self.create_scaling_policy(t, stack,
- 'my-policy')
+ up_policy = self.create_scaling_policy(t, stack, 'my-policy')
group = stack['my-group']
- self.patchobject(group, 'adjust')
- mock_fin_scaling = self.patchobject(up_policy, '_finished_scaling')
- with mock.patch.object(up_policy,
+ self.patchobject(group, 'resize')
+ self.patchobject(group, '_lb_reload')
+ mock_fin_scaling = self.patchobject(group, '_finished_scaling')
+
+ with mock.patch.object(group,
'_check_scaling_allowed') as mock_isa:
self.assertIsNone(up_policy.handle_signal())
- mock_isa.assert_called_once_with()
- mock_fin_scaling.assert_called_once_with('change_in_capacity : 1',
+ mock_isa.assert_called_once_with(60)
+ mock_fin_scaling.assert_called_once_with(60,
+ 'change_in_capacity : 1',
size_changed=True)
def test_scaling_policy_cooldown_toosoon(self):
@@ -115,39 +106,27 @@ class TestAutoScalingPolicy(common.HeatTestCase):
group = stack['my-group']
test = {'current': 'alarm'}
- with mock.patch.object(group, 'adjust',
- side_effect=AssertionError) as dont_call:
- with mock.patch.object(
- pol, '_check_scaling_allowed',
- side_effect=resource.NoActionRequired) as mock_cip:
- self.assertRaises(resource.NoActionRequired,
- pol.handle_signal, details=test)
- mock_cip.assert_called_once_with()
- self.assertEqual([], dont_call.call_args_list)
-
- def test_policy_metadata_reset(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'my-policy')
- metadata = {'scaling_in_progress': True}
- pol.metadata_set(metadata)
- pol.handle_metadata_reset()
-
- new_metadata = pol.metadata_get()
- self.assertEqual({'scaling_in_progress': False}, new_metadata)
+ with mock.patch.object(
+ group, '_check_scaling_allowed',
+ side_effect=resource.NoActionRequired) as mock_cip:
+ self.assertRaises(resource.NoActionRequired,
+ pol.handle_signal, details=test)
+ mock_cip.assert_called_once_with(60)
def test_scaling_policy_cooldown_ok(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'my-policy')
+ group = stack['my-group']
test = {'current': 'alarm'}
+ self.patchobject(group, '_finished_scaling')
+ self.patchobject(group, '_lb_reload')
+ mock_resize = self.patchobject(group, 'resize')
- group = self.patchobject(pol.stack, 'resource_by_refid').return_value
- group.name = 'fluffy'
- with mock.patch.object(pol, '_check_scaling_allowed') as mock_isa:
+ with mock.patch.object(group, '_check_scaling_allowed') as mock_isa:
pol.handle_signal(details=test)
- mock_isa.assert_called_once_with()
- group.adjust.assert_called_once_with(1, 'change_in_capacity', None)
+ mock_isa.assert_called_once_with(60)
+ mock_resize.assert_called_once_with(1)
def test_scaling_policy_refid(self):
t = template_format.parse(as_template)
@@ -170,111 +149,6 @@ class TestAutoScalingPolicy(common.HeatTestCase):
self.assertEqual('convg_xyz', rsrc.FnGetRefId())
-class TestCooldownMixin(common.HeatTestCase):
- def create_scaling_policy(self, t, stack, resource_name):
- rsrc = stack[resource_name]
- self.assertIsNone(rsrc.validate())
- scheduler.TaskRunner(rsrc.create)()
- self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
- return rsrc
-
- def test_cooldown_is_in_progress_toosoon(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'my-policy')
-
- now = timeutils.utcnow()
- previous_meta = {'cooldown': {
- now.isoformat(): 'change_in_capacity : 1'}}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- ex = self.assertRaises(resource.NoActionRequired,
- pol._check_scaling_allowed)
- self.assertIn('due to cooldown', six.text_type(ex))
-
- def test_cooldown_is_in_progress_scaling_unfinished(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'my-policy')
-
- previous_meta = {'scaling_in_progress': True}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- ex = self.assertRaises(resource.NoActionRequired,
- pol._check_scaling_allowed)
- self.assertIn('due to scaling activity', six.text_type(ex))
-
- def test_cooldown_not_in_progress(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'my-policy')
-
- awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
- previous_meta = {
- 'cooldown': {
- awhile_ago.isoformat(): 'change_in_capacity : 1'
- },
- 'scaling_in_progress': False
- }
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- self.assertIsNone(pol._check_scaling_allowed())
-
- def test_scaling_policy_cooldown_zero(self):
- t = template_format.parse(as_template)
-
- # Create the scaling policy (with cooldown=0) and scale up one
- properties = t['resources']['my-policy']['properties']
- properties['cooldown'] = '0'
-
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'my-policy')
-
- now = timeutils.utcnow()
- previous_meta = {'cooldown': {
- now.isoformat(): 'change_in_capacity : 1'}}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- self.assertIsNone(pol._check_scaling_allowed())
-
- def test_scaling_policy_cooldown_none(self):
- t = template_format.parse(as_template)
-
- # Create the scaling policy no cooldown property, should behave the
- # same as when cooldown==0
- properties = t['resources']['my-policy']['properties']
- del properties['cooldown']
-
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'my-policy')
-
- now = timeutils.utcnow()
- previous_meta = {'cooldown': {
- now.isoformat(): 'change_in_capacity : 1'}}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- self.assertIsNone(pol._check_scaling_allowed())
-
- def test_no_cooldown_no_scaling_in_progress(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'my-policy')
-
- # no cooldown entry in the metadata
- previous_meta = {'scaling_in_progress': False}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- self.assertIsNone(pol._check_scaling_allowed())
-
- def test_metadata_is_written(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'my-policy')
-
- nowish = timeutils.utcnow()
- reason = 'cool as'
- meta_set = self.patchobject(pol, 'metadata_set')
- self.patchobject(timeutils, 'utcnow', return_value=nowish)
- pol._finished_scaling(reason, size_changed=True)
- meta_set.assert_called_once_with(
- {'cooldown': {nowish.isoformat(): reason},
- 'scaling_in_progress': False})
-
-
class ScalingPolicyAttrTest(common.HeatTestCase):
def setUp(self):
super(ScalingPolicyAttrTest, self).setUp()
diff --git a/heat/tests/autoscaling/test_scaling_group.py b/heat/tests/autoscaling/test_scaling_group.py
index 233cf5076..90b155fd0 100644
--- a/heat/tests/autoscaling/test_scaling_group.py
+++ b/heat/tests/autoscaling/test_scaling_group.py
@@ -11,9 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import json
import mock
+from oslo_utils import timeutils
import six
from heat.common import exception
@@ -357,6 +359,7 @@ class TestGroupAdjust(common.HeatTestCase):
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(3)
finished_scaling.assert_called_once_with(
+ None,
'PercentChangeInCapacity : 33',
size_changed=True)
@@ -388,6 +391,7 @@ class TestGroupAdjust(common.HeatTestCase):
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(3)
finished_scaling.assert_called_once_with(
+ None,
'PercentChangeInCapacity : -33',
size_changed=True)
@@ -416,7 +420,8 @@ class TestGroupAdjust(common.HeatTestCase):
self.assertEqual(expected_notifies, notify.call_args_list)
resize.assert_called_once_with(1)
- finished_scaling.assert_called_once_with('ChangeInCapacity : 1',
+ finished_scaling.assert_called_once_with(None,
+ 'ChangeInCapacity : 1',
size_changed=True)
grouputils.get_size.assert_called_once_with(self.group)
@@ -722,3 +727,121 @@ class RollingUpdatePolicyDiffTest(common.HeatTestCase):
def test_update_policy_removed(self):
self.validate_update_policy_diff(asg_tmpl_with_updt_policy(),
inline_templates.as_template)
+
+
+class TestCooldownMixin(common.HeatTestCase):
+ def setUp(self):
+ super(TestCooldownMixin, self).setUp()
+ t = template_format.parse(inline_templates.as_template)
+ self.stack = utils.parse_stack(t, params=inline_templates.as_params)
+ self.stack.store()
+ self.group = self.stack['WebServerGroup']
+ self.group.state_set('CREATE', 'COMPLETE')
+
+ def test_cooldown_is_in_progress_toosoon(self):
+ cooldown_end = timeutils.utcnow() + datetime.timedelta(seconds=60)
+ previous_meta = {'cooldown_end': {
+ cooldown_end.isoformat(): 'ChangeInCapacity : 1'}}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertRaises(resource.NoActionRequired,
+ self.group._check_scaling_allowed,
+ 60)
+
+ def test_cooldown_is_in_progress_toosoon_legacy(self):
+ now = timeutils.utcnow()
+ previous_meta = {'cooldown': {
+ now.isoformat(): 'ChangeInCapacity : 1'}}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertRaises(resource.NoActionRequired,
+ self.group._check_scaling_allowed,
+ 60)
+
+ def test_cooldown_is_in_progress_scaling_unfinished(self):
+ previous_meta = {'scaling_in_progress': True}
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertRaises(resource.NoActionRequired,
+ self.group._check_scaling_allowed,
+ 60)
+
+ def test_scaling_not_in_progress_legacy(self):
+ awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
+ previous_meta = {
+ 'cooldown': {
+ awhile_ago.isoformat(): 'ChangeInCapacity : 1'
+ },
+ 'scaling_in_progress': False
+ }
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(60))
+
+ def test_scaling_not_in_progress(self):
+ awhile_after = timeutils.utcnow() + datetime.timedelta(seconds=60)
+ previous_meta = {
+ 'cooldown_end': {
+ awhile_after.isoformat(): 'ChangeInCapacity : 1'
+ },
+ 'scaling_in_progress': False
+ }
+ timeutils.set_time_override()
+ timeutils.advance_time_seconds(100)
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(60))
+ timeutils.clear_time_override()
+
+ def test_scaling_policy_cooldown_zero(self):
+ now = timeutils.utcnow()
+ previous_meta = {
+ 'cooldown_end': {
+ now.isoformat(): 'ChangeInCapacity : 1'
+ },
+ 'scaling_in_progress': False
+ }
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(60))
+
+ def test_scaling_policy_cooldown_none(self):
+ now = timeutils.utcnow()
+ previous_meta = {
+ 'cooldown_end': {
+ now.isoformat(): 'ChangeInCapacity : 1'
+ },
+ 'scaling_in_progress': False
+ }
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ self.assertIsNone(self.group._check_scaling_allowed(None))
+
+ def test_metadata_is_written(self):
+ nowish = timeutils.utcnow()
+ reason = 'cool as'
+ meta_set = self.patchobject(self.group, 'metadata_set')
+ self.patchobject(timeutils, 'utcnow', return_value=nowish)
+ self.group._finished_scaling(60, reason)
+ cooldown_end = nowish + datetime.timedelta(seconds=60)
+ meta_set.assert_called_once_with(
+ {'cooldown_end': {cooldown_end.isoformat(): reason},
+ 'scaling_in_progress': False})
+
+ def test_metadata_is_written_update(self):
+ nowish = timeutils.utcnow()
+ reason = 'cool as'
+ prev_cooldown_end = nowish + datetime.timedelta(seconds=100)
+ previous_meta = {
+ 'cooldown_end': {
+ prev_cooldown_end.isoformat(): 'ChangeInCapacity : 1'
+ }
+ }
+ self.patchobject(self.group, 'metadata_get',
+ return_value=previous_meta)
+ meta_set = self.patchobject(self.group, 'metadata_set')
+ self.patchobject(timeutils, 'utcnow', return_value=nowish)
+ self.group._finished_scaling(60, reason)
+ meta_set.assert_called_once_with(
+ {'cooldown_end': {prev_cooldown_end.isoformat(): reason},
+ 'scaling_in_progress': False})
diff --git a/heat/tests/autoscaling/test_scaling_policy.py b/heat/tests/autoscaling/test_scaling_policy.py
index dff0fe536..c87b9efed 100644
--- a/heat/tests/autoscaling/test_scaling_policy.py
+++ b/heat/tests/autoscaling/test_scaling_policy.py
@@ -11,10 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import datetime
-
import mock
-from oslo_utils import timeutils
import six
from heat.common import exception
@@ -87,14 +84,7 @@ class TestAutoScalingPolicy(common.HeatTestCase):
group = stack['WebServerGroup']
self.patchobject(group, 'adjust',
side_effect=resource.NoActionRequired())
- mock_fin_scaling = self.patchobject(up_policy, '_finished_scaling')
- with mock.patch.object(up_policy,
- '_check_scaling_allowed') as mock_isa:
- self.assertRaises(resource.NoActionRequired,
- up_policy.handle_signal)
- mock_isa.assert_called_once_with()
- mock_fin_scaling.assert_called_once_with('ChangeInCapacity : 1',
- size_changed=False)
+ self.assertRaises(resource.NoActionRequired, up_policy.handle_signal)
def test_scaling_policy_adjust_size_changed(self):
t = template_format.parse(as_template)
@@ -102,13 +92,15 @@ class TestAutoScalingPolicy(common.HeatTestCase):
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
group = stack['WebServerGroup']
- self.patchobject(group, 'adjust')
- mock_fin_scaling = self.patchobject(up_policy, '_finished_scaling')
- with mock.patch.object(up_policy,
+ self.patchobject(group, 'resize')
+ self.patchobject(group, '_lb_reload')
+ mock_fin_scaling = self.patchobject(group, '_finished_scaling')
+ with mock.patch.object(group,
'_check_scaling_allowed') as mock_isa:
self.assertIsNone(up_policy.handle_signal())
- mock_isa.assert_called_once_with()
- mock_fin_scaling.assert_called_once_with('ChangeInCapacity : 1',
+ mock_isa.assert_called_once_with(60)
+ mock_fin_scaling.assert_called_once_with(60,
+ 'ChangeInCapacity : 1',
size_changed=True)
def test_scaling_policy_cooldown_toosoon(self):
@@ -118,28 +110,27 @@ class TestAutoScalingPolicy(common.HeatTestCase):
group = stack['WebServerGroup']
test = {'current': 'alarm'}
- with mock.patch.object(group, 'adjust',
- side_effect=AssertionError) as dont_call:
- with mock.patch.object(
- pol, '_check_scaling_allowed',
- side_effect=resource.NoActionRequired) as mock_isa:
- self.assertRaises(resource.NoActionRequired,
- pol.handle_signal, details=test)
- mock_isa.assert_called_once_with()
- self.assertEqual([], dont_call.call_args_list)
+ with mock.patch.object(
+ group, '_check_scaling_allowed',
+ side_effect=resource.NoActionRequired) as mock_isa:
+ self.assertRaises(resource.NoActionRequired,
+ pol.handle_signal, details=test)
+ mock_isa.assert_called_once_with(60)
def test_scaling_policy_cooldown_ok(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
+ group = stack['WebServerGroup']
test = {'current': 'alarm'}
+ self.patchobject(group, '_finished_scaling')
+ self.patchobject(group, '_lb_reload')
+ mock_resize = self.patchobject(group, 'resize')
- group = self.patchobject(pol.stack, 'resource_by_refid').return_value
- group.name = 'fluffy'
- with mock.patch.object(pol, '_check_scaling_allowed') as mock_isa:
+ with mock.patch.object(group, '_check_scaling_allowed') as mock_isa:
pol.handle_signal(details=test)
- mock_isa.assert_called_once_with()
- group.adjust.assert_called_once_with(1, 'ChangeInCapacity', None)
+ mock_isa.assert_called_once_with(60)
+ mock_resize.assert_called_once_with(1)
@mock.patch.object(aws_sp.AWSScalingPolicy, '_get_ec2_signed_url')
def test_scaling_policy_refid_signed_url(self, mock_get_ec2_url):
@@ -170,99 +161,6 @@ class TestAutoScalingPolicy(common.HeatTestCase):
self.assertEqual('http://convg_signed_url', rsrc.FnGetRefId())
-class TestCooldownMixin(common.HeatTestCase):
- def create_scaling_policy(self, t, stack, resource_name):
- rsrc = stack[resource_name]
- self.assertIsNone(rsrc.validate())
- scheduler.TaskRunner(rsrc.create)()
- self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
- return rsrc
-
- def test_cooldown_is_in_progress_toosoon(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
-
- now = timeutils.utcnow()
- previous_meta = {'cooldown': {
- now.isoformat(): 'ChangeInCapacity : 1'}}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- ex = self.assertRaises(resource.NoActionRequired,
- pol._check_scaling_allowed)
- self.assertIn('due to cooldown', six.text_type(ex))
-
- def test_cooldown_is_in_progress_scaling_unfinished(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
-
- previous_meta = {'scaling_in_progress': True}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- ex = self.assertRaises(resource.NoActionRequired,
- pol._check_scaling_allowed)
- self.assertIn('due to scaling activity', six.text_type(ex))
-
- def test_cooldown_not_in_progress(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
-
- awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
- previous_meta = {
- 'cooldown': {
- awhile_ago.isoformat(): 'ChangeInCapacity : 1'
- },
- 'scaling_in_progress': False
- }
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- self.assertIsNone(pol._check_scaling_allowed())
-
- def test_scaling_policy_cooldown_zero(self):
- t = template_format.parse(as_template)
-
- # Create the scaling policy (with Cooldown=0) and scale up one
- properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
- properties['Cooldown'] = '0'
-
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
-
- now = timeutils.utcnow()
- previous_meta = {now.isoformat(): 'ChangeInCapacity : 1'}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- self.assertIsNone(pol._check_scaling_allowed())
-
- def test_scaling_policy_cooldown_none(self):
- t = template_format.parse(as_template)
-
- # Create the scaling policy no Cooldown property, should behave the
- # same as when Cooldown==0
- properties = t['Resources']['WebServerScaleUpPolicy']['Properties']
- del properties['Cooldown']
-
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
-
- now = timeutils.utcnow()
- previous_meta = {now.isoformat(): 'ChangeInCapacity : 1'}
- self.patchobject(pol, 'metadata_get', return_value=previous_meta)
- self.assertIsNone(pol._check_scaling_allowed())
-
- def test_metadata_is_written(self):
- t = template_format.parse(as_template)
- stack = utils.parse_stack(t, params=as_params)
- pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
-
- nowish = timeutils.utcnow()
- reason = 'cool as'
- meta_set = self.patchobject(pol, 'metadata_set')
- self.patchobject(timeutils, 'utcnow', return_value=nowish)
- pol._finished_scaling(reason)
- meta_set.assert_called_once_with(
- {'cooldown': {nowish.isoformat(): reason},
- 'scaling_in_progress': False})
-
-
class ScalingPolicyAttrTest(common.HeatTestCase):
def setUp(self):
super(ScalingPolicyAttrTest, self).setUp()
diff --git a/heat/tests/aws/test_eip.py b/heat/tests/aws/test_eip.py
index abf118615..46253e476 100644
--- a/heat/tests/aws/test_eip.py
+++ b/heat/tests/aws/test_eip.py
@@ -14,6 +14,7 @@
import copy
import mock
+from neutronclient.common import exceptions as q_exceptions
from neutronclient.v2_0 import client as neutronclient
import six
@@ -175,6 +176,8 @@ class EIPTest(common.HeatTestCase):
self.m.StubOutWithMock(neutronclient.Client, 'list_networks')
self.m.StubOutWithMock(self.fc.servers, 'get')
self.m.StubOutWithMock(neutronclient.Client,
+ 'list_floatingips')
+ self.m.StubOutWithMock(neutronclient.Client,
'create_floatingip')
self.m.StubOutWithMock(neutronclient.Client,
'show_floatingip')
@@ -183,7 +186,22 @@ class EIPTest(common.HeatTestCase):
self.m.StubOutWithMock(neutronclient.Client,
'delete_floatingip')
+ def mock_interface(self, port, ip):
+ class MockIface(object):
+ def __init__(self, port_id, fixed_ip):
+ self.port_id = port_id
+ self.fixed_ips = [{'ip_address': fixed_ip}]
+
+ return MockIface(port, ip)
+
+ def mock_list_floatingips(self):
+ neutronclient.Client.list_floatingips(
+ floating_ip_address='11.0.0.1').AndReturn({
+ 'floatingips': [{'id':
+ "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}]})
+
def mock_create_floatingip(self):
+ nova.NovaClientPlugin._create().AndReturn(self.fc)
neutronclient.Client.list_networks(
**{'router:external': True}).AndReturn({'networks': [{
'status': 'ACTIVE',
@@ -217,6 +235,22 @@ class EIPTest(common.HeatTestCase):
'id': 'ffff'
}})
+ def mock_update_floatingip(self,
+ fip='fc68ea2c-b60b-4b4f-bd82-94ec81110766',
+ delete_assc=False):
+ if delete_assc:
+ request_body = {
+ 'floatingip': {
+ 'port_id': None,
+ 'fixed_ip_address': None}}
+ else:
+ request_body = {
+ 'floatingip': {
+ 'port_id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ 'fixed_ip_address': '1.2.3.4'}}
+ neutronclient.Client.update_floatingip(
+ fip, request_body).AndReturn(None)
+
def mock_delete_floatingip(self):
id = 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
neutronclient.Client.delete_floatingip(id).AndReturn(None)
@@ -245,23 +279,18 @@ class EIPTest(common.HeatTestCase):
rsrc.node_data())
return rsrc
- def _mock_server_get(self, server='WebServer', mock_server=None,
- multiple=False, mock_again=False):
- if not mock_again:
- nova.NovaClientPlugin._create().AndReturn(self.fc)
- if multiple:
- self.fc.servers.get(server).MultipleTimes().AndReturn(
- mock_server)
- else:
- self.fc.servers.get(server).AndReturn(mock_server)
-
def test_eip(self):
mock_server = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=mock_server)
- self._mock_server_get(mock_again=True)
+ self.patchobject(self.fc.servers, 'get',
+ return_value=mock_server)
self.mock_create_floatingip()
+ self.mock_update_floatingip()
+ self.mock_update_floatingip(delete_assc=True)
self.mock_delete_floatingip()
self.m.ReplayAll()
+ iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '1.2.3.4')
+ self.patchobject(mock_server, 'interface_list', return_value=[iface])
t = template_format.parse(eip_template)
stack = utils.parse_stack(t)
@@ -285,13 +314,18 @@ class EIPTest(common.HeatTestCase):
self.m.VerifyAll()
def test_eip_update(self):
- server_old = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=server_old)
-
- server_update = self.fc.servers.list()[1]
- self._mock_server_get(server='5678', mock_server=server_update,
- multiple=True, mock_again=True)
+ mock_server = self.fc.servers.list()[0]
+ self.patchobject(self.fc.servers, 'get',
+ return_value=mock_server)
self.mock_create_floatingip()
+ self.mock_update_floatingip()
+ self.mock_update_floatingip()
+ self.mock_update_floatingip(delete_assc=True)
+
+ iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '1.2.3.4')
+ self.patchobject(mock_server, 'interface_list', return_value=[iface])
+
self.m.ReplayAll()
t = template_format.parse(eip_template)
stack = utils.parse_stack(t)
@@ -299,6 +333,13 @@ class EIPTest(common.HeatTestCase):
rsrc = self.create_eip(t, stack, 'IPAddress')
self.assertEqual('11.0.0.1', rsrc.FnGetRefId())
# update with the new InstanceId
+ server_update = self.fc.servers.list()[1]
+ self.patchobject(self.fc.servers, 'get',
+ return_value=server_update)
+ iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '1.2.3.4')
+ self.patchobject(server_update, 'interface_list', return_value=[iface])
+
props = copy.deepcopy(rsrc.properties.data)
update_server_id = '5678'
props['InstanceId'] = update_server_id
@@ -317,12 +358,20 @@ class EIPTest(common.HeatTestCase):
self.m.VerifyAll()
def test_association_eip(self):
- server = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=server, multiple=True)
-
+ mock_server = self.fc.servers.list()[0]
+ self.patchobject(self.fc.servers, 'get',
+ return_value=mock_server)
self.mock_create_floatingip()
- self.mock_delete_floatingip()
self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
+ self.mock_update_floatingip()
+ self.mock_list_floatingips()
+ self.mock_list_floatingips()
+ self.mock_update_floatingip(delete_assc=True)
+ self.mock_delete_floatingip()
+ iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '1.2.3.4')
+ self.patchobject(mock_server, 'interface_list', return_value=[iface])
+
self.m.ReplayAll()
t = template_format.parse(eip_template_ipassoc)
@@ -425,6 +474,8 @@ class AllocTest(common.HeatTestCase):
self.m.StubOutWithMock(neutronclient.Client,
'update_floatingip')
self.m.StubOutWithMock(neutronclient.Client,
+ 'list_floatingips')
+ self.m.StubOutWithMock(neutronclient.Client,
'delete_floatingip')
self.m.StubOutWithMock(neutronclient.Client,
'add_gateway_router')
@@ -435,6 +486,14 @@ class AllocTest(common.HeatTestCase):
self.m.StubOutWithMock(neutronclient.Client,
'remove_gateway_router')
+ def mock_interface(self, port, ip):
+ class MockIface(object):
+ def __init__(self, port_id, fixed_ip):
+ self.port_id = port_id
+ self.fixed_ips = [{'ip_address': fixed_ip}]
+
+ return MockIface(port, ip)
+
def _setup_test_stack_validate(self, stack_name):
t = template_format.parse(ipassoc_template_validate)
template = tmpl.Template(t)
@@ -469,15 +528,18 @@ class AllocTest(common.HeatTestCase):
"id": "22c26451-cf27-4d48-9031-51f5e397b84e"
}})
- def _mock_server_get(self, server='WebServer', mock_server=None,
- multiple=False, mock_again=False):
- if not mock_again:
- nova.NovaClientPlugin._create().AndReturn(self.fc)
- if multiple:
- self.fc.servers.get(server).MultipleTimes().AndReturn(
- mock_server)
- else:
- self.fc.servers.get(server).AndReturn(mock_server)
+ def _mock_server(self, mock_interface=False, mock_server=None):
+ self.patchobject(nova.NovaClientPlugin, '_create',
+ return_value=self.fc)
+ if not mock_server:
+ mock_server = self.fc.servers.list()[0]
+ self.patchobject(self.fc.servers, 'get',
+ return_value=mock_server)
+ if mock_interface:
+ iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '1.2.3.4')
+ self.patchobject(mock_server,
+ 'interface_list', return_value=[iface])
def create_eip(self, t, stack, resource_name):
rsrc = eip.ElasticIp(resource_name,
@@ -502,11 +564,6 @@ class AllocTest(common.HeatTestCase):
rsrc.node_data())
return rsrc
- def mock_update_floatingip(self, port='the_nic'):
- neutronclient.Client.update_floatingip(
- 'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
- {'floatingip': {'port_id': port}}).AndReturn(None)
-
def mock_create_gateway_attachment(self):
neutronclient.Client.add_gateway_router(
'bbbb', {'network_id': 'eeee'}).AndReturn(None)
@@ -532,6 +589,29 @@ class AllocTest(common.HeatTestCase):
"floating_ip_address": "11.0.0.1"
}})
+ def mock_update_floatingip(self,
+ fip='fc68ea2c-b60b-4b4f-bd82-94ec81110766',
+ port_id='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ex=None,
+ with_address=True,
+ delete_assc=False):
+ if delete_assc:
+ request_body = {
+ 'floatingip': {'port_id': None}}
+ if with_address:
+ request_body['floatingip']['fixed_ip_address'] = None
+ else:
+ request_body = {
+ 'floatingip': {'port_id': port_id}}
+ if with_address:
+ request_body['floatingip']['fixed_ip_address'] = '1.2.3.4'
+ if ex:
+ neutronclient.Client.update_floatingip(
+ fip, request_body).AndRaise(ex)
+ else:
+ neutronclient.Client.update_floatingip(
+ fip, request_body).AndReturn(None)
+
def mock_show_floatingip(self, refid):
neutronclient.Client.show_floatingip(
refid,
@@ -545,6 +625,12 @@ class AllocTest(common.HeatTestCase):
'id': 'ffff'
}})
+ def mock_list_floatingips(self, ip_addr='11.0.0.1'):
+ neutronclient.Client.list_floatingips(
+ floating_ip_address=ip_addr).AndReturn({
+ 'floatingips': [{'id':
+ "fc68ea2c-b60b-4b4f-bd82-94ec81110766"}]})
+
def mock_delete_floatingip(self):
id = 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
neutronclient.Client.delete_floatingip(id).AndReturn(None)
@@ -620,9 +706,10 @@ class AllocTest(common.HeatTestCase):
self.mock_list_ports()
self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
- self.mock_update_floatingip()
+ self.mock_update_floatingip(port_id='the_nic',
+ with_address=False)
- self.mock_update_floatingip(port=None)
+ self.mock_update_floatingip(delete_assc=True, with_address=False)
self.mock_delete_floatingip()
self.m.ReplayAll()
@@ -639,10 +726,7 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_association_allocationid_with_instance(self):
- server = self.fc.servers.list()[0]
- self._mock_server_get(server='1fafbe59-2332-4f5f-bfa4-517b4d6c1b65',
- mock_server=server,
- multiple=True)
+ self._mock_server()
self.mock_show_network()
self.mock_create_floatingip()
@@ -650,9 +734,10 @@ class AllocTest(common.HeatTestCase):
self.mock_no_router_for_vpc()
self.mock_update_floatingip(
- port='a000228d-b40b-4124-8394-a4082ae1b76c')
+ port_id='a000228d-b40b-4124-8394-a4082ae1b76c',
+ with_address=False)
- self.mock_update_floatingip(port=None)
+ self.mock_update_floatingip(delete_assc=True, with_address=False)
self.mock_delete_floatingip()
self.m.ReplayAll()
@@ -669,9 +754,9 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_validate_properties_EIP_and_AllocationId(self):
- self._mock_server_get(server='1fafbe59-2332-4f5f-bfa4-517b4d6c1b65',
- multiple=True)
+ self._mock_server()
self.m.ReplayAll()
+
template, stack = self._setup_test_stack_validate(
stack_name='validate_EIP_AllocationId')
@@ -689,8 +774,7 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_validate_EIP_and_InstanceId(self):
- self._mock_server_get(server='1fafbe59-2332-4f5f-bfa4-517b4d6c1b65',
- multiple=True)
+ self._mock_server()
self.m.ReplayAll()
template, stack = self._setup_test_stack_validate(
stack_name='validate_EIP_InstanceId')
@@ -703,8 +787,7 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_validate_without_NetworkInterfaceId_and_InstanceId(self):
- self._mock_server_get(server='1fafbe59-2332-4f5f-bfa4-517b4d6c1b65',
- multiple=True)
+ self._mock_server()
self.m.ReplayAll()
template, stack = self._setup_test_stack_validate(
stack_name='validate_EIP_InstanceId')
@@ -727,15 +810,12 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_delete_association_successful_if_create_failed(self):
- server = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=server, multiple=True)
- self.m.StubOutWithMock(self.fc.servers, 'add_floating_ip')
- self.fc.servers.add_floating_ip(server, '11.0.0.1').AndRaise(
- fakes_nova.fake_exception(400))
+ self._mock_server(mock_interface=True)
self.mock_create_floatingip()
self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
+ self.mock_list_floatingips()
+ self.mock_update_floatingip(ex=q_exceptions.NotFound('Not Found'))
self.m.ReplayAll()
-
t = template_format.parse(eip_template_ipassoc)
stack = utils.parse_stack(t)
@@ -755,16 +835,12 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_update_association_with_InstanceId(self):
- server = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=server, multiple=True)
-
- server_update = self.fc.servers.list()[1]
- self._mock_server_get(server='5678',
- mock_server=server_update,
- multiple=True,
- mock_again=True)
-
+ self._mock_server(mock_interface=True)
self.mock_create_floatingip()
+ self.mock_list_floatingips()
+ self.mock_update_floatingip()
+ self.mock_list_floatingips()
+ self.mock_update_floatingip()
self.m.ReplayAll()
t = template_format.parse(eip_template_ipassoc)
@@ -772,7 +848,8 @@ class AllocTest(common.HeatTestCase):
self.create_eip(t, stack, 'IPAddress')
ass = self.create_association(t, stack, 'IPAssoc')
self.assertEqual('11.0.0.1', ass.properties['EIP'])
-
+ server_update = self.fc.servers.list()[1]
+ self._mock_server(mock_interface=True, mock_server=server_update)
# update with the new InstanceId
props = copy.deepcopy(ass.properties.data)
update_server_id = '5678'
@@ -786,9 +863,14 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_update_association_with_EIP(self):
- server = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=server, multiple=True)
+ self._mock_server(mock_interface=True)
self.mock_create_floatingip()
+ self.mock_list_floatingips()
+ self.mock_update_floatingip()
+ self.mock_list_floatingips()
+ self.mock_update_floatingip(delete_assc=True)
+ self.mock_list_floatingips(ip_addr='11.0.0.2')
+ self.mock_update_floatingip()
self.m.ReplayAll()
t = template_format.parse(eip_template_ipassoc)
@@ -809,17 +891,22 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_update_association_with_AllocationId_or_EIP(self):
- server = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=server, multiple=True)
+ self._mock_server(mock_interface=True)
self.mock_create_floatingip()
-
self.mock_list_instance_ports('WebServer')
self.mock_show_network()
self.mock_no_router_for_vpc()
- self.mock_update_floatingip(
- port='a000228d-b40b-4124-8394-a4082ae1b76c')
+ self.mock_list_floatingips()
+ self.mock_update_floatingip()
- self.mock_update_floatingip(port=None)
+ self.mock_list_floatingips()
+ self.mock_update_floatingip(delete_assc=True)
+ self.mock_update_floatingip(
+ port_id='a000228d-b40b-4124-8394-a4082ae1b76c',
+ with_address=False)
+ self.mock_list_floatingips(ip_addr='11.0.0.2')
+ self.mock_update_floatingip(delete_assc=True, with_address=False)
+ self.mock_update_floatingip()
self.m.ReplayAll()
t = template_format.parse(eip_template_ipassoc)
@@ -854,17 +941,11 @@ class AllocTest(common.HeatTestCase):
self.m.VerifyAll()
def test_update_association_needs_update_InstanceId(self):
- server = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=server, multiple=True)
+ self._mock_server(mock_interface=True)
self.mock_create_floatingip()
+ self.mock_list_floatingips()
self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
-
- server_update = self.fc.servers.list()[1]
- self._mock_server_get(server='5678',
- mock_server=server_update,
- multiple=True,
- mock_again=True)
-
+ self.mock_update_floatingip()
self.m.ReplayAll()
t = template_format.parse(eip_template_ipassoc)
@@ -875,6 +956,8 @@ class AllocTest(common.HeatTestCase):
after_props = {'InstanceId': {'Ref': 'WebServer2'},
'EIP': '11.0.0.1'}
before = self.create_association(t, stack, 'IPAssoc')
+ update_server = self.fc.servers.list()[1]
+ self._mock_server(mock_interface=False, mock_server=update_server)
after = rsrc_defn.ResourceDefinition(before.name, before.type(),
after_props)
self.assertTrue(resource.UpdateReplace,
@@ -882,17 +965,11 @@ class AllocTest(common.HeatTestCase):
before_props, None))
def test_update_association_needs_update_InstanceId_EIP(self):
- server = self.fc.servers.list()[0]
- self._mock_server_get(mock_server=server, multiple=True)
+ self._mock_server(mock_interface=True)
self.mock_create_floatingip()
+ self.mock_list_floatingips()
self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
-
- server_update = self.fc.servers.list()[1]
- self._mock_server_get(server='5678',
- mock_server=server_update,
- multiple=True,
- mock_again=True)
-
+ self.mock_update_floatingip()
self.m.ReplayAll()
t = template_format.parse(eip_template_ipassoc)
@@ -901,6 +978,8 @@ class AllocTest(common.HeatTestCase):
after_props = {'InstanceId': '5678',
'EIP': '11.0.0.2'}
before = self.create_association(t, stack, 'IPAssoc')
+ update_server = self.fc.servers.list()[1]
+ self._mock_server(mock_interface=False, mock_server=update_server)
after = rsrc_defn.ResourceDefinition(before.name, before.type(),
after_props)
updater = scheduler.TaskRunner(before.update, after)
@@ -911,21 +990,19 @@ class AllocTest(common.HeatTestCase):
self.mock_list_ports()
self.mock_show_network()
self.mock_no_router_for_vpc()
- self.mock_update_floatingip()
+ self.mock_update_floatingip(port_id='the_nic', with_address=False)
self.mock_list_ports(id='a000228d-b40b-4124-8394-a4082ae1b76b')
self.mock_show_network()
self.mock_no_router_for_vpc()
self.mock_update_floatingip(
- port='a000228d-b40b-4124-8394-a4082ae1b76b')
+ port_id='a000228d-b40b-4124-8394-a4082ae1b76b', with_address=False)
- update_server = self.fc.servers.list()[0]
- self._mock_server_get(server='5678', mock_server=update_server)
self.mock_list_instance_ports('5678')
self.mock_show_network()
self.mock_no_router_for_vpc()
self.mock_update_floatingip(
- port='a000228d-b40b-4124-8394-a4082ae1b76c')
+ port_id='a000228d-b40b-4124-8394-a4082ae1b76c', with_address=False)
self.m.ReplayAll()
@@ -946,6 +1023,9 @@ class AllocTest(common.HeatTestCase):
self.assertEqual((ass.UPDATE, ass.COMPLETE), ass.state)
# update with the InstanceId
+ update_server = self.fc.servers.list()[1]
+ self._mock_server(mock_server=update_server)
+
props = copy.deepcopy(ass.properties.data)
instance_id = '5678'
props.pop('NetworkInterfaceId')
diff --git a/heat/tests/aws/test_volume.py b/heat/tests/aws/test_volume.py
index 2c62c86c2..9a04bb9f1 100644
--- a/heat/tests/aws/test_volume.py
+++ b/heat/tests/aws/test_volume.py
@@ -168,6 +168,7 @@ class VolumeTest(vt_base.BaseVolumeTest):
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
+ stack._update_all_resource_data(True, False)
rsrc = stack['DataVolume']
self.assertIsNone(rsrc.validate())
@@ -740,7 +741,7 @@ class VolumeTest(vt_base.BaseVolumeTest):
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
- def test_vaildate_deletion_policy(self):
+ def test_validate_deletion_policy(self):
cfg.CONF.set_override('backups_enabled', False, group='volumes')
stack_name = 'test_volume_validate_deletion_policy'
self.t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
diff --git a/heat/tests/clients/test_clients.py b/heat/tests/clients/test_clients.py
index 8ee875983..0c57c99ad 100644
--- a/heat/tests/clients/test_clients.py
+++ b/heat/tests/clients/test_clients.py
@@ -100,26 +100,6 @@ class ClientsTest(common.HeatTestCase):
obj = self._client_cfn_url()
self.assertEqual("http://0.0.0.0:8000/v1/", obj.get_heat_cfn_url())
- def test_clients_get_watch_server_url(self):
- obj = self._client_cfn_url()
- self.assertEqual("http://0.0.0.0:8003/v1/",
- obj.get_watch_server_url())
-
- def test_clients_get_watch_server_url_ipv6(self):
- obj = self._client_cfn_url(use_ipv6=True)
- self.assertEqual("http://[::1]:8003/v1/",
- obj.get_watch_server_url())
-
- def test_clients_get_watch_server_url_use_uwsgi_ipv6(self):
- obj = self._client_cfn_url(use_uwsgi=True, use_ipv6=True)
- self.assertEqual("http://[::1]/heat-api-cloudwatch/v1/",
- obj.get_watch_server_url())
-
- def test_clients_get_watch_server_url_use_uwsgi(self):
- obj = self._client_cfn_url(use_uwsgi=True)
- self.assertEqual("http://0.0.0.0/heat-api-cloudwatch/v1/",
- obj.get_watch_server_url())
-
def test_clients_get_heat_cfn_metadata_url(self):
obj = self._client_cfn_url()
self.assertEqual("http://0.0.0.0:8000/v1/",
diff --git a/heat/tests/clients/test_monasca_client.py b/heat/tests/clients/test_monasca_client.py
index 416e8e18d..adc4cec06 100644
--- a/heat/tests/clients/test_monasca_client.py
+++ b/heat/tests/clients/test_monasca_client.py
@@ -14,6 +14,8 @@
import mock
import six
+import monascaclient
+
from heat.common import exception as heat_exception
from heat.engine.clients.os import monasca as client_plugin
from heat.tests import common
@@ -47,6 +49,12 @@ class MonascaClientPluginTest(common.HeatTestCase):
client = plugin.client()
self.assertIsNotNone(client.metrics)
+ @mock.patch.object(monascaclient.client, '_session')
+ def test_client_uses_session(self, mock_session):
+ context = mock.MagicMock()
+ monasca_client = client_plugin.MonascaClientPlugin(context=context)
+ self.assertIsNotNone(monasca_client._create())
+
class MonascaClientPluginNotificationTest(common.HeatTestCase):
diff --git a/heat/tests/clients/test_nova_client.py b/heat/tests/clients/test_nova_client.py
index 09b70c8ca..64a99e499 100644
--- a/heat/tests/clients/test_nova_client.py
+++ b/heat/tests/clients/test_nova_client.py
@@ -363,8 +363,6 @@ class NovaClientPluginUserdataTest(NovaClientPluginTestCase):
"""Tests the build_userdata function."""
cfg.CONF.set_override('heat_metadata_server_url',
'http://server.test:123')
- cfg.CONF.set_override('heat_watch_server_url',
- 'http://server.test:345')
cfg.CONF.set_override('instance_connection_is_secure', False)
cfg.CONF.set_override(
'instance_connection_https_validate_certificates', False)
@@ -374,7 +372,6 @@ class NovaClientPluginUserdataTest(NovaClientPluginTestCase):
self.assertIn("Content-Type: text/part-handler;", data)
self.assertIn("Content-Type: text/x-cfninitdata;", data)
self.assertIn("Content-Type: text/x-shellscript;", data)
- self.assertIn("http://server.test:345", data)
self.assertIn("http://server.test:123", data)
self.assertIn("[Boto]", data)
@@ -382,8 +379,6 @@ class NovaClientPluginUserdataTest(NovaClientPluginTestCase):
"""Don't add a custom instance user when not requested."""
cfg.CONF.set_override('heat_metadata_server_url',
'http://server.test:123')
- cfg.CONF.set_override('heat_watch_server_url',
- 'http://server.test:345')
data = self.nova_plugin.build_userdata({}, instance_user=None)
self.assertNotIn('user: ', data)
self.assertNotIn('useradd', data)
@@ -393,8 +388,6 @@ class NovaClientPluginUserdataTest(NovaClientPluginTestCase):
"""Add a custom instance user."""
cfg.CONF.set_override('heat_metadata_server_url',
'http://server.test:123')
- cfg.CONF.set_override('heat_watch_server_url',
- 'http://server.test:345')
data = self.nova_plugin.build_userdata({}, instance_user='ec2-user')
self.assertIn('user: ', data)
self.assertIn('useradd', data)
diff --git a/heat/tests/clients/test_octavia_client.py b/heat/tests/clients/test_octavia_client.py
new file mode 100644
index 000000000..092f1aa88
--- /dev/null
+++ b/heat/tests/clients/test_octavia_client.py
@@ -0,0 +1,24 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.tests import common
+from heat.tests import utils
+
+
+class OctaviaClientPluginTest(common.HeatTestCase):
+
+ def test_create(self):
+ context = utils.dummy_context()
+ plugin = context.clients.client_plugin('octavia')
+ client = plugin.client()
+ self.assertIsNotNone(client.endpoint)
diff --git a/heat/tests/clients/test_sdk_client.py b/heat/tests/clients/test_sdk_client.py
index e4e3f4d65..45935b13c 100644
--- a/heat/tests/clients/test_sdk_client.py
+++ b/heat/tests/clients/test_sdk_client.py
@@ -22,7 +22,8 @@ from heat.tests import utils
class OpenStackSDKPluginTest(common.HeatTestCase):
- def test_create(self):
+ @mock.patch('openstack.connection.Connection')
+ def test_create(self, mock_connection):
context = utils.dummy_context()
plugin = context.clients.client_plugin('openstack')
client = plugin.client()
diff --git a/heat/tests/clients/test_senlin_client.py b/heat/tests/clients/test_senlin_client.py
index 3434444c4..9510050d1 100644
--- a/heat/tests/clients/test_senlin_client.py
+++ b/heat/tests/clients/test_senlin_client.py
@@ -12,15 +12,16 @@
# under the License.
import mock
+from openstack import exceptions
from heat.engine.clients.os import senlin as senlin_plugin
from heat.tests import common
from heat.tests import utils
-from senlinclient.common import exc
class SenlinClientPluginTest(common.HeatTestCase):
- def setUp(self):
+ @mock.patch('openstack.connection.Connection')
+ def setUp(self, mock_connection):
super(SenlinClientPluginTest, self).setUp()
context = utils.dummy_context()
self.plugin = context.clients.client_plugin('senlin')
@@ -31,10 +32,10 @@ class SenlinClientPluginTest(common.HeatTestCase):
def test_is_bad_request(self):
self.assertTrue(self.plugin.is_bad_request(
- exc.sdkexc.HttpException(http_status=400)))
+ exceptions.HttpException(http_status=400)))
self.assertFalse(self.plugin.is_bad_request(Exception))
self.assertFalse(self.plugin.is_bad_request(
- exc.sdkexc.HttpException(http_status=404)))
+ exceptions.HttpException(http_status=404)))
def test_check_action_success(self):
mock_action = mock.MagicMock()
@@ -71,7 +72,8 @@ class SenlinClientPluginTest(common.HeatTestCase):
class ProfileConstraintTest(common.HeatTestCase):
- def setUp(self):
+ @mock.patch('openstack.connection.Connection')
+ def setUp(self, mock_connection):
super(ProfileConstraintTest, self).setUp()
self.senlin_client = mock.MagicMock()
self.ctx = utils.dummy_context()
@@ -85,17 +87,18 @@ class ProfileConstraintTest(common.HeatTestCase):
self.assertTrue(self.constraint.validate("PROFILE_ID", self.ctx))
def test_validate_false(self):
- self.mock_get_profile.side_effect = exc.sdkexc.ResourceNotFound(
+ self.mock_get_profile.side_effect = exceptions.ResourceNotFound(
'PROFILE_ID')
self.assertFalse(self.constraint.validate("PROFILE_ID", self.ctx))
- self.mock_get_profile.side_effect = exc.sdkexc.HttpException(
+ self.mock_get_profile.side_effect = exceptions.HttpException(
'PROFILE_ID')
self.assertFalse(self.constraint.validate("PROFILE_ID", self.ctx))
class ClusterConstraintTest(common.HeatTestCase):
- def setUp(self):
+ @mock.patch('openstack.connection.Connection')
+ def setUp(self, mock_connection):
super(ClusterConstraintTest, self).setUp()
self.senlin_client = mock.MagicMock()
self.ctx = utils.dummy_context()
@@ -109,17 +112,18 @@ class ClusterConstraintTest(common.HeatTestCase):
self.assertTrue(self.constraint.validate("CLUSTER_ID", self.ctx))
def test_validate_false(self):
- self.mock_get_cluster.side_effect = exc.sdkexc.ResourceNotFound(
+ self.mock_get_cluster.side_effect = exceptions.ResourceNotFound(
'CLUSTER_ID')
self.assertFalse(self.constraint.validate("CLUSTER_ID", self.ctx))
- self.mock_get_cluster.side_effect = exc.sdkexc.HttpException(
+ self.mock_get_cluster.side_effect = exceptions.HttpException(
'CLUSTER_ID')
self.assertFalse(self.constraint.validate("CLUSTER_ID", self.ctx))
class PolicyConstraintTest(common.HeatTestCase):
- def setUp(self):
+ @mock.patch('openstack.connection.Connection')
+ def setUp(self, mock_connection):
super(PolicyConstraintTest, self).setUp()
self.senlin_client = mock.MagicMock()
self.ctx = utils.dummy_context()
@@ -133,17 +137,18 @@ class PolicyConstraintTest(common.HeatTestCase):
self.assertTrue(self.constraint.validate("POLICY_ID", self.ctx))
def test_validate_false(self):
- self.mock_get_policy.side_effect = exc.sdkexc.ResourceNotFound(
+ self.mock_get_policy.side_effect = exceptions.ResourceNotFound(
'POLICY_ID')
self.assertFalse(self.constraint.validate("POLICY_ID", self.ctx))
- self.mock_get_policy.side_effect = exc.sdkexc.HttpException(
+ self.mock_get_policy.side_effect = exceptions.HttpException(
'POLICY_ID')
self.assertFalse(self.constraint.validate("POLICY_ID", self.ctx))
class ProfileTypeConstraintTest(common.HeatTestCase):
- def setUp(self):
+ @mock.patch('openstack.connection.Connection')
+ def setUp(self, mock_connection):
super(ProfileTypeConstraintTest, self).setUp()
self.senlin_client = mock.MagicMock()
self.ctx = utils.dummy_context()
@@ -168,7 +173,8 @@ class ProfileTypeConstraintTest(common.HeatTestCase):
class PolicyTypeConstraintTest(common.HeatTestCase):
- def setUp(self):
+ @mock.patch('openstack.connection.Connection')
+ def setUp(self, mock_connection):
super(PolicyTypeConstraintTest, self).setUp()
self.senlin_client = mock.MagicMock()
self.ctx = utils.dummy_context()
diff --git a/heat/tests/clients/test_swift_client.py b/heat/tests/clients/test_swift_client.py
index 3ceaadaa4..4b6d1060f 100644
--- a/heat/tests/clients/test_swift_client.py
+++ b/heat/tests/clients/test_swift_client.py
@@ -76,7 +76,7 @@ class SwiftUtilsTest(SwiftClientPluginTestCase):
url = self.swift_plugin.get_temp_url(container_name, obj_name)
self.assertFalse(self.swift_client.post_account.called)
regexp = ("http://fake-host.com:8080/v1/AUTH_demo/%s"
- "/%s\?temp_url_sig=[0-9a-f]{40}&"
+ r"/%s\?temp_url_sig=[0-9a-f]{40}&"
"temp_url_expires=[0-9]{10}" %
(container_name, obj_name))
self.assertThat(url, matchers.MatchesRegex(regexp))
@@ -119,7 +119,7 @@ class SwiftUtilsTest(SwiftClientPluginTestCase):
self.assertTrue(self.swift_client.put_container.called)
self.assertTrue(self.swift_client.put_object.called)
regexp = ("http://fake-host.com:8080/v1/AUTH_demo/%s"
- "/%s\?temp_url_sig=[0-9a-f]{40}&"
+ r"/%s\?temp_url_sig=[0-9a-f]{40}&"
"temp_url_expires=[0-9]{10}" %
(container_name, obj_name))
self.assertThat(url, matchers.MatchesRegex(regexp))
diff --git a/heat/tests/clients/test_zun_client.py b/heat/tests/clients/test_zun_client.py
index 28b7124f3..2bb636554 100644
--- a/heat/tests/clients/test_zun_client.py
+++ b/heat/tests/clients/test_zun_client.py
@@ -22,3 +22,5 @@ class ZunClientPluginTest(common.HeatTestCase):
client = plugin.client()
self.assertEqual('http://server.test:5000/v3',
client.containers.api.session.auth.endpoint)
+ self.assertEqual('1.12',
+ client.api_version.get_string())
diff --git a/heat/tests/constraints/test_heat_constraints.py b/heat/tests/constraints/test_heat_constraints.py
deleted file mode 100644
index 22ebf8554..000000000
--- a/heat/tests/constraints/test_heat_constraints.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from heat.engine.constraint import heat_constraints as hc
-from heat.tests import common
-
-
-class ResourceTypeConstraintTest(common.HeatTestCase):
-
- def setUp(self):
- super(ResourceTypeConstraintTest, self).setUp()
- self.constraint = hc.ResourceTypeConstraint()
-
- self.mock_template = mock.MagicMock()
- self.mock_env = mock.MagicMock()
- self.mock_template.env = self.mock_env
-
- def test_validate(self):
- # Setup
- value = ['OS::Heat::None']
-
- # Test
- result = self.constraint.validate(value, None, self.mock_template)
-
- # Verify
- self.assertTrue(result)
- self.mock_env.get_class.assert_called_once_with(value[0])
-
- def test_validate_failure(self):
- # Setup
- value = ['OS::Heat::None']
- self.mock_env.get_class.side_effect = Exception()
-
- # Test
- result = self.constraint.validate(value, None, self.mock_template)
-
- # Verify
- self.assertFalse(result)
- self.assertIn('OS::Heat::None', self.constraint._error_message)
- self.mock_env.get_class.assert_called_once_with(value[0])
-
- def test_validate_multiple_failures(self):
- # Setup
- value = ['OS::Heat::None', 'OS::Heat::RandomString']
- self.mock_env.get_class.side_effect = [Exception(), Exception()]
-
- # Test
- result = self.constraint.validate(value, None, self.mock_template)
-
- # Verify
- self.assertFalse(result)
- self.assertIn('OS::Heat::None,OS::Heat::RandomString',
- self.constraint._error_message)
- self.mock_env.get_class.assert_has_calls([mock.call(value[0]),
- mock.call(value[1])])
-
- def test_validate_single_item(self):
- # Setup
- value = 'OS::Heat::None'
-
- # Test
- result = self.constraint.validate(value, None, self.mock_template)
-
- # Verify
- self.assertTrue(result)
- self.mock_env.get_class.assert_called_once_with(value)
-
- def test_validate_non_string(self):
- result = self.constraint.validate(dict(), None, self.mock_template)
- self.assertFalse(result)
diff --git a/heat/tests/db/test_migrations.py b/heat/tests/db/test_migrations.py
index 62670531f..69a6b1ff3 100644
--- a/heat/tests/db/test_migrations.py
+++ b/heat/tests/db/test_migrations.py
@@ -124,6 +124,7 @@ class HeatMigrationsCheckers(test_migrations.WalkVersionsMixin,
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
exceptions = [
64, # drop constraint
+ 86, # drop watch_rule/watch_data tables
]
# Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE
diff --git a/heat/tests/db/test_sqlalchemy_api.py b/heat/tests/db/test_sqlalchemy_api.py
index 98cf8ced0..53618eef6 100644
--- a/heat/tests/db/test_sqlalchemy_api.py
+++ b/heat/tests/db/test_sqlalchemy_api.py
@@ -1437,27 +1437,6 @@ def create_event(ctx, legacy_prop_data=False, **kwargs):
return db_api.event_create(ctx, values)
-def create_watch_rule(ctx, stack, **kwargs):
- values = {
- 'name': 'test_rule',
- 'rule': json.loads('{"foo": "123"}'),
- 'state': 'normal',
- 'last_evaluated': timeutils.utcnow(),
- 'stack_id': stack.id,
- }
- values.update(kwargs)
- return db_api.watch_rule_create(ctx, values)
-
-
-def create_watch_data(ctx, watch_rule, **kwargs):
- values = {
- 'data': json.loads('{"foo": "bar"}'),
- 'watch_rule_id': watch_rule.id
- }
- values.update(kwargs)
- return db_api.watch_data_create(ctx, values)
-
-
def create_service(ctx, **kwargs):
values = {
'id': '7079762f-c863-4954-ba61-9dccb68c57e2',
@@ -2913,117 +2892,6 @@ class DBAPIEventTest(common.HeatTestCase):
self.stack2.id))
-class DBAPIWatchRuleTest(common.HeatTestCase):
- def setUp(self):
- super(DBAPIWatchRuleTest, self).setUp()
- self.ctx = utils.dummy_context()
- self.template = create_raw_template(self.ctx)
- self.user_creds = create_user_creds(self.ctx)
- self.stack = create_stack(self.ctx, self.template, self.user_creds)
-
- def test_watch_rule_create_get(self):
- watch_rule = create_watch_rule(self.ctx, self.stack)
- ret_wr = db_api.watch_rule_get(self.ctx, watch_rule.id)
- self.assertIsNotNone(ret_wr)
- self.assertEqual('test_rule', ret_wr.name)
- self.assertEqual('{"foo": "123"}', json.dumps(ret_wr.rule))
- self.assertEqual('normal', ret_wr.state)
- self.assertEqual(self.stack.id, ret_wr.stack_id)
-
- def test_watch_rule_get_by_name(self):
- watch_rule = create_watch_rule(self.ctx, self.stack)
- ret_wr = db_api.watch_rule_get_by_name(self.ctx, watch_rule.name)
- self.assertIsNotNone(ret_wr)
- self.assertEqual('test_rule', ret_wr.name)
-
- def test_watch_rule_get_all(self):
- values = [
- {'name': 'rule1'},
- {'name': 'rule2'},
- {'name': 'rule3'},
- ]
- [create_watch_rule(self.ctx, self.stack, **val) for val in values]
-
- wrs = db_api.watch_rule_get_all(self.ctx)
- self.assertEqual(3, len(wrs))
-
- names = [wr.name for wr in wrs]
- [self.assertIn(val['name'], names) for val in values]
-
- def test_watch_rule_get_all_by_stack(self):
- self.stack1 = create_stack(self.ctx, self.template, self.user_creds)
-
- values = [
- {'name': 'rule1', 'stack_id': self.stack.id},
- {'name': 'rule2', 'stack_id': self.stack1.id},
- {'name': 'rule3', 'stack_id': self.stack1.id},
- ]
- [create_watch_rule(self.ctx, self.stack, **val) for val in values]
-
- wrs = db_api.watch_rule_get_all_by_stack(self.ctx, self.stack.id)
- self.assertEqual(1, len(wrs))
- wrs = db_api.watch_rule_get_all_by_stack(self.ctx, self.stack1.id)
- self.assertEqual(2, len(wrs))
-
- def test_watch_rule_update(self):
- watch_rule = create_watch_rule(self.ctx, self.stack)
- values = {
- 'name': 'test_rule_1',
- 'rule': json.loads('{"foo": "bar"}'),
- 'state': 'nodata',
- }
- db_api.watch_rule_update(self.ctx, watch_rule.id, values)
- watch_rule = db_api.watch_rule_get(self.ctx, watch_rule.id)
- self.assertEqual('test_rule_1', watch_rule.name)
- self.assertEqual('{"foo": "bar"}', json.dumps(watch_rule.rule))
- self.assertEqual('nodata', watch_rule.state)
-
- self.assertRaises(exception.NotFound, db_api.watch_rule_update,
- self.ctx, UUID2, values)
-
- def test_watch_rule_delete(self):
- watch_rule = create_watch_rule(self.ctx, self.stack)
- create_watch_data(self.ctx, watch_rule)
- db_api.watch_rule_delete(self.ctx, watch_rule.id)
- self.assertIsNone(db_api.watch_rule_get(self.ctx, watch_rule.id))
- self.assertRaises(exception.NotFound, db_api.watch_rule_delete,
- self.ctx, UUID2)
-
- # Testing associated watch data deletion
- self.assertEqual([], db_api.watch_data_get_all(self.ctx))
-
-
-class DBAPIWatchDataTest(common.HeatTestCase):
- def setUp(self):
- super(DBAPIWatchDataTest, self).setUp()
- self.ctx = utils.dummy_context()
- self.template = create_raw_template(self.ctx)
- self.user_creds = create_user_creds(self.ctx)
- self.stack = create_stack(self.ctx, self.template, self.user_creds)
- self.watch_rule = create_watch_rule(self.ctx, self.stack)
-
- def test_watch_data_create(self):
- create_watch_data(self.ctx, self.watch_rule)
- ret_data = db_api.watch_data_get_all(self.ctx)
- self.assertEqual(1, len(ret_data))
-
- self.assertEqual('{"foo": "bar"}', json.dumps(ret_data[0].data))
- self.assertEqual(self.watch_rule.id, ret_data[0].watch_rule_id)
-
- def test_watch_data_get_all(self):
- values = [
- {'data': json.loads('{"foo": "d1"}')},
- {'data': json.loads('{"foo": "d2"}')},
- {'data': json.loads('{"foo": "d3"}')}
- ]
- [create_watch_data(self.ctx, self.watch_rule, **val) for val in values]
- watch_data = db_api.watch_data_get_all(self.ctx)
- self.assertEqual(3, len(watch_data))
-
- data = [wd.data for wd in watch_data]
- [self.assertIn(val['data'], data) for val in values]
-
-
class DBAPIServiceTest(common.HeatTestCase):
def setUp(self):
super(DBAPIServiceTest, self).setUp()
diff --git a/heat/tests/engine/service/test_software_config.py b/heat/tests/engine/service/test_software_config.py
index 38c23eeb7..15079dcd4 100644
--- a/heat/tests/engine/service/test_software_config.py
+++ b/heat/tests/engine/service/test_software_config.py
@@ -677,6 +677,7 @@ class SoftwareConfigServiceTest(common.HeatTestCase):
with mock.patch.object(self.ctx.session, 'refresh'):
f = self.engine.software_config._push_metadata_software_deployments
+ self.patchobject(f.retry, 'sleep')
self.assertRaises(
exception.ConcurrentTransaction,
f,
diff --git a/heat/tests/engine/service/test_stack_watch.py b/heat/tests/engine/service/test_stack_watch.py
deleted file mode 100644
index 5e995d27e..000000000
--- a/heat/tests/engine/service/test_stack_watch.py
+++ /dev/null
@@ -1,270 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo_messaging.rpc import dispatcher
-
-from heat.common import exception
-from heat.engine import service
-from heat.engine import service_stack_watch
-from heat.engine import stack
-from heat.engine import watchrule
-from heat.objects import stack as stack_object
-from heat.objects import watch_data as watch_data_object
-from heat.objects import watch_rule as watch_rule_object
-from heat.rpc import api as rpc_api
-from heat.tests import common
-from heat.tests.engine import tools
-from heat.tests import utils
-
-
-class StackWatchTest(common.HeatTestCase):
-
- def setUp(self):
- super(StackWatchTest, self).setUp()
-
- self.ctx = utils.dummy_context(tenant_id='stack_watch_test_tenant')
- self.eng = service.EngineService('a-host', 'a-topic')
- # self.eng.engine_id = 'engine-fake-uuid'
-
- def _create_periodic_tasks(self):
- self.eng.create_periodic_tasks()
- self.eng.manage_thread_grp.wait()
-
- @mock.patch.object(service_stack_watch.StackWatch, 'start_watch_task')
- @mock.patch.object(stack_object.Stack, 'get_all')
- @mock.patch.object(service.service.Service, 'start')
- def test_start_watches_all_stacks(self, mock_super_start, mock_get_all,
- start_watch_task):
- s1 = mock.Mock(id=1)
- s2 = mock.Mock(id=2)
- mock_get_all.return_value = [s1, s2]
- start_watch_task.return_value = None
-
- self.eng.thread_group_mgr = None
- self._create_periodic_tasks()
-
- mock_get_all.assert_called_once_with(mock.ANY,
- show_hidden=True)
- calls = start_watch_task.call_args_list
- self.assertEqual(2, start_watch_task.call_count)
- self.assertIn(mock.call(1, mock.ANY), calls)
- self.assertIn(mock.call(2, mock.ANY), calls)
-
- @tools.stack_context('service_show_watch_test_stack', False)
- def test_show_watch(self):
- # Insert two dummy watch rules into the DB
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmActions': [u'WebServerRestartPolicy'],
- u'AlarmDescription': u'Restart the WikiDatabase',
- u'Namespace': u'system/linux',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'ServiceFailure'}
- self.wr = []
- self.wr.append(watchrule.WatchRule(context=self.ctx,
- watch_name='show_watch_1',
- rule=rule,
- watch_data=[],
- stack_id=self.stack.id,
- state='NORMAL'))
- self.wr[0].store()
-
- self.wr.append(watchrule.WatchRule(context=self.ctx,
- watch_name='show_watch_2',
- rule=rule,
- watch_data=[],
- stack_id=self.stack.id,
- state='NORMAL'))
- self.wr[1].store()
-
- # watch_name=None should return all watches
- result = self.eng.show_watch(self.ctx, watch_name=None)
- result_names = [r.get('name') for r in result]
- self.assertIn('show_watch_1', result_names)
- self.assertIn('show_watch_2', result_names)
-
- result = self.eng.show_watch(self.ctx, watch_name="show_watch_1")
- self.assertEqual(1, len(result))
- self.assertIn('name', result[0])
- self.assertEqual('show_watch_1', result[0]['name'])
-
- result = self.eng.show_watch(self.ctx, watch_name="show_watch_2")
- self.assertEqual(1, len(result))
- self.assertIn('name', result[0])
- self.assertEqual('show_watch_2', result[0]['name'])
-
- ex = self.assertRaises(dispatcher.ExpectedException,
- self.eng.show_watch,
- self.ctx, watch_name="nonexistent")
- self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
-
- # Check the response has all keys defined in the engine API
- for key in rpc_api.WATCH_KEYS:
- self.assertIn(key, result[0])
-
- @tools.stack_context('service_show_watch_metric_test_stack', False)
- def test_show_watch_metric(self):
- # Insert dummy watch rule into the DB
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmActions': [u'WebServerRestartPolicy'],
- u'AlarmDescription': u'Restart the WikiDatabase',
- u'Namespace': u'system/linux',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'ServiceFailure'}
- self.wr = watchrule.WatchRule(context=self.ctx,
- watch_name='show_watch_metric_1',
- rule=rule,
- watch_data=[],
- stack_id=self.stack.id,
- state='NORMAL')
- self.wr.store()
-
- # And add a metric datapoint
- watch = watch_rule_object.WatchRule.get_by_name(self.ctx,
- 'show_watch_metric_1')
- self.assertIsNotNone(watch)
- values = {'watch_rule_id': watch.id,
- 'data': {u'Namespace': u'system/linux',
- u'ServiceFailure': {
- u'Units': u'Counter', u'Value': 1}}}
- watch_data_object.WatchData.create(self.ctx, values)
-
- # Check there is one result returned
- result = self.eng.show_watch_metric(self.ctx,
- metric_namespace=None,
- metric_name=None)
- self.assertEqual(1, len(result))
-
- # Create another metric datapoint and check we get two
- watch_data_object.WatchData.create(self.ctx, values)
- result = self.eng.show_watch_metric(self.ctx,
- metric_namespace=None,
- metric_name=None)
- self.assertEqual(2, len(result))
-
- # Check the response has all keys defined in the engine API
- for key in rpc_api.WATCH_DATA_KEYS:
- self.assertIn(key, result[0])
-
- @tools.stack_context('service_show_watch_state_test_stack')
- @mock.patch.object(stack.Stack, 'resource_by_refid')
- def test_set_watch_state(self, mock_ref):
- self._create_periodic_tasks()
- # Insert dummy watch rule into the DB
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmActions': [u'WebServerRestartPolicy'],
- u'AlarmDescription': u'Restart the WikiDatabase',
- u'Namespace': u'system/linux',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'ServiceFailure'}
- self.wr = watchrule.WatchRule(context=self.ctx,
- watch_name='OverrideAlarm',
- rule=rule,
- watch_data=[],
- stack_id=self.stack.id,
- state='NORMAL')
- self.wr.store()
-
- class DummyAction(object):
- def signal(self):
- return "dummyfoo"
-
- dummy_action = DummyAction()
- mock_ref.return_value = dummy_action
-
- # Replace the real stack threadgroup with a dummy one, so we can
- # check the function returned on ALARM is correctly scheduled
- dtg = tools.DummyThreadGroup()
- self.eng.thread_group_mgr.groups[self.stack.id] = dtg
-
- state = watchrule.WatchRule.NODATA
- result = self.eng.set_watch_state(self.ctx,
- watch_name="OverrideAlarm",
- state=state)
- self.assertEqual(state, result[rpc_api.WATCH_STATE_VALUE])
- self.assertEqual(
- [], self.eng.thread_group_mgr.groups[self.stack.id].threads)
-
- state = watchrule.WatchRule.NORMAL
- result = self.eng.set_watch_state(self.ctx,
- watch_name="OverrideAlarm",
- state=state)
- self.assertEqual(state, result[rpc_api.WATCH_STATE_VALUE])
- self.assertEqual(
- [], self.eng.thread_group_mgr.groups[self.stack.id].threads)
-
- state = watchrule.WatchRule.ALARM
- result = self.eng.set_watch_state(self.ctx,
- watch_name="OverrideAlarm",
- state=state)
- self.assertEqual(state, result[rpc_api.WATCH_STATE_VALUE])
- self.assertEqual(
- [dummy_action.signal],
- self.eng.thread_group_mgr.groups[self.stack.id].threads)
-
- mock_ref.assert_called_once_with('WebServerRestartPolicy')
-
- @tools.stack_context('service_show_watch_state_badstate_test_stack')
- @mock.patch.object(watchrule.WatchRule, 'set_watch_state')
- def test_set_watch_state_badstate(self, mock_set):
- mock_set.side_effect = ValueError
- # Insert dummy watch rule into the DB
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmActions': [u'WebServerRestartPolicy'],
- u'AlarmDescription': u'Restart the WikiDatabase',
- u'Namespace': u'system/linux',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'ServiceFailure'}
- self.wr = watchrule.WatchRule(context=self.ctx,
- watch_name='OverrideAlarm2',
- rule=rule,
- watch_data=[],
- stack_id=self.stack.id,
- state='NORMAL')
- self.wr.store()
-
- for state in ["HGJHGJHG", "1234", "!\*(&%"]:
- self.assertRaises(ValueError,
- self.eng.set_watch_state,
- self.ctx, watch_name="OverrideAlarm2",
- state=state)
-
- calls = [mock.call("HGJHGJHG"),
- mock.call("1234"),
- mock.call("!\*(&%")]
- mock_set.assert_has_calls(calls)
-
- @mock.patch.object(watchrule.WatchRule, 'load')
- def test_set_watch_state_noexist(self, mock_load):
- state = watchrule.WatchRule.ALARM # State valid
- mock_load.side_effect = exception.EntityNotFound(entity='Watch Rule',
- name='test')
-
- ex = self.assertRaises(dispatcher.ExpectedException,
- self.eng.set_watch_state,
- self.ctx, watch_name="nonexistent",
- state=state)
- self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
- mock_load.assert_called_once_with(self.ctx, "nonexistent")
diff --git a/heat/tests/engine/test_resource_type.py b/heat/tests/engine/test_resource_type.py
index 921ed3b96..25bbc05e0 100644
--- a/heat/tests/engine/test_resource_type.py
+++ b/heat/tests/engine/test_resource_type.py
@@ -43,7 +43,7 @@ class ResourceTypeTest(common.HeatTestCase):
mock_is_service_available):
mock_is_service_available.return_value = (True, None)
resources = self.eng.list_resource_types(self.ctx, "DEPRECATED")
- self.assertEqual(set(['OS::Heat::HARestarter',
+ self.assertEqual(set(['OS::Aodh::Alarm',
'OS::Magnum::Bay',
'OS::Magnum::BayModel',
'OS::Glance::Image',
diff --git a/heat/tests/engine/test_sync_point.py b/heat/tests/engine/test_sync_point.py
index 615f70cd3..cbe03d164 100644
--- a/heat/tests/engine/test_sync_point.py
+++ b/heat/tests/engine/test_sync_point.py
@@ -74,15 +74,16 @@ class SyncPointTestCase(common.HeatTestCase):
self.assertEqual({'input_data': {u'tuple:(3, 8)': None}}, res)
@mock.patch('heat.engine.sync_point.update_input_data', return_value=None)
- @mock.patch('eventlet.sleep', side_effect=exception.DBError)
+ @mock.patch('time.sleep', side_effect=exception.DBError)
def sync_with_sleep(self, ctx, stack, mock_sleep_time, mock_uid):
resource = stack['C']
graph = stack.convergence_dependencies.graph()
mock_callback = mock.Mock()
+ sender = (3, True)
self.assertRaises(exception.DBError, sync_point.sync, ctx, resource.id,
stack.current_traversal, True, mock_callback,
- set(graph[(resource.id, True)]), {})
+ set(graph[(resource.id, True)]), {sender: None})
return mock_sleep_time
def test_sync_with_time_throttle(self):
@@ -92,4 +93,4 @@ class SyncPointTestCase(common.HeatTestCase):
convergence=True)
stack.converge_stack(stack.t, action=stack.CREATE)
mock_sleep_time = self.sync_with_sleep(ctx, stack)
- mock_sleep_time.assert_called_once_with(mock.ANY)
+ self.assertTrue(mock_sleep_time.called)
diff --git a/heat/tests/openstack/aodh/test_alarm.py b/heat/tests/openstack/aodh/test_alarm.py
index 0411c8670..23ab45d96 100644
--- a/heat/tests/openstack/aodh/test_alarm.py
+++ b/heat/tests/openstack/aodh/test_alarm.py
@@ -26,7 +26,6 @@ from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template as tmpl
-from heat.engine import watchrule
from heat.tests import common
from heat.tests import utils
@@ -372,8 +371,9 @@ class AodhAlarmTest(common.HeatTestCase):
# python 3.4.3 returns another error message
# so try to handle this by regexp
msg = ("Property error: Resources.MEMAlarmHigh.Properties.%s: "
- "int\(\) argument must be a string(, a bytes-like "
- "object)? or a number, not 'list'" % p)
+ r"int\(\) argument must be a string"
+ "(, a bytes-like object)?"
+ " or a number, not 'list'" % p)
self.assertRaisesRegex(exception.StackValidationFailed,
msg, rsrc.validate)
@@ -403,48 +403,6 @@ class AodhAlarmTest(common.HeatTestCase):
'MEMAlarmHigh', resource_defns['MEMAlarmHigh'], stack)
self.assertIsNone(rsrc.validate())
- def test_delete_watchrule_destroy(self):
- t = template_format.parse(alarm_template)
-
- test_stack = self.create_stack(template=json.dumps(t))
- rsrc = test_stack['MEMAlarmHigh']
-
- wr = mock.MagicMock()
- self.patchobject(watchrule.WatchRule, 'load', return_value=wr)
- wr.destroy.return_value = None
-
- self.patchobject(aodh.AodhClientPlugin, 'client',
- return_value=self.fa)
- self.patchobject(self.fa.alarm, 'delete')
- rsrc.resource_id = '12345'
-
- self.assertEqual('12345', rsrc.handle_delete())
- self.assertEqual(1, wr.destroy.call_count)
- # check that super method has been called and execute deleting
- self.assertEqual(1, self.fa.alarm.delete.call_count)
-
- def test_delete_no_watchrule(self):
- t = template_format.parse(alarm_template)
-
- test_stack = self.create_stack(template=json.dumps(t))
- rsrc = test_stack['MEMAlarmHigh']
-
- wr = mock.MagicMock()
- self.patchobject(watchrule.WatchRule, 'load',
- side_effect=[exception.EntityNotFound(
- entity='Watch Rule', name='test')])
- wr.destroy.return_value = None
-
- self.patchobject(aodh.AodhClientPlugin, 'client',
- return_value=self.fa)
- self.patchobject(self.fa.alarm, 'delete')
- rsrc.resource_id = '12345'
-
- self.assertEqual('12345', rsrc.handle_delete())
- self.assertEqual(0, wr.destroy.call_count)
- # check that super method has been called and execute deleting
- self.assertEqual(1, self.fa.alarm.delete.call_count)
-
def _prepare_resource(self, for_check=True):
snippet = template_format.parse(not_string_alarm_template)
self.stack = utils.parse_stack(snippet)
@@ -456,25 +414,12 @@ class AodhAlarmTest(common.HeatTestCase):
res.client().alarm.get.return_value = mock_alarm
return res
- @mock.patch.object(alarm.watchrule.WatchRule, 'load')
- def test_check(self, mock_load):
+ def test_check(self):
res = self._prepare_resource()
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
- @mock.patch.object(alarm.watchrule.WatchRule, 'load')
- def test_check_watchrule_failure(self, mock_load):
- res = self._prepare_resource()
- exc = alarm.exception.EntityNotFound(entity='Watch Rule', name='Boom')
- mock_load.side_effect = exc
-
- self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(res.check))
- self.assertEqual((res.CHECK, res.FAILED), res.state)
- self.assertIn('Boom', res.status_reason)
-
- @mock.patch.object(alarm.watchrule.WatchRule, 'load')
- def test_check_alarm_failure(self, mock_load):
+ def test_check_alarm_failure(self):
res = self._prepare_resource()
res.client().alarm.get.side_effect = Exception('Boom')
diff --git a/heat/tests/openstack/designate/test_domain.py b/heat/tests/openstack/designate/test_domain.py
index 4500c4618..ceff73da9 100644
--- a/heat/tests/openstack/designate/test_domain.py
+++ b/heat/tests/openstack/designate/test_domain.py
@@ -38,8 +38,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Designate::Domain'
-
class DesignateDomainTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/designate/test_record.py b/heat/tests/openstack/designate/test_record.py
index 0ec3b862a..747ed51e8 100644
--- a/heat/tests/openstack/designate/test_record.py
+++ b/heat/tests/openstack/designate/test_record.py
@@ -40,8 +40,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Designate::Record'
-
class DesignateRecordTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/designate/test_recordset.py b/heat/tests/openstack/designate/test_recordset.py
index 43bec33a9..7980fc245 100644
--- a/heat/tests/openstack/designate/test_recordset.py
+++ b/heat/tests/openstack/designate/test_recordset.py
@@ -39,8 +39,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Designate::Record'
-
class DesignateRecordSetTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/designate/test_zone.py b/heat/tests/openstack/designate/test_zone.py
index 6af8aeb69..8f5a0288c 100644
--- a/heat/tests/openstack/designate/test_zone.py
+++ b/heat/tests/openstack/designate/test_zone.py
@@ -38,8 +38,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Designate::Zone'
-
class DesignateZoneTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/heat/test_cloudwatch.py b/heat/tests/openstack/heat/test_cloudwatch.py
deleted file mode 100644
index d93a43e47..000000000
--- a/heat/tests/openstack/heat/test_cloudwatch.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from heat.common import exception
-from heat.common import template_format
-from heat.engine import resource
-from heat.engine import resources
-from heat.engine.resources.openstack.heat import cloud_watch
-from heat.engine import scheduler
-from heat.engine import watchrule
-from heat.tests import common
-from heat.tests import utils
-
-
-AWS_CloudWatch_Alarm = '''
-HeatTemplateFormatVersion: '2012-12-12'
-Description: Template which tests alarms
-Resources:
- test_me:
- Type: AWS::CloudWatch::Alarm
- Properties:
- MetricName: cpu_util
- Namespace: AWS/EC2
- Statistic: Average
- Period: '60'
- EvaluationPeriods: '1'
- Threshold: '50'
- ComparisonOperator: GreaterThanThreshold
-'''
-
-
-class CloudWatchAlarmTest(common.HeatTestCase):
-
- def setUp(self):
- super(CloudWatchAlarmTest, self).setUp()
-
- def clear_register_class():
- env = resources.global_env()
- env.registry._registry.pop('CWLiteAlarmForTest')
-
- self.ctx = utils.dummy_context()
- resource._register_class('CWLiteAlarmForTest',
- cloud_watch.CloudWatchAlarm)
- self.addCleanup(clear_register_class)
-
- def parse_stack(self):
- t = template_format.parse(AWS_CloudWatch_Alarm)
- env = {'resource_registry': {
- 'AWS::CloudWatch::Alarm': 'CWLiteAlarmForTest'
- }}
- self.stack = utils.parse_stack(t, params=env)
- return self.stack
-
- def test_resource_create_good(self):
- s = self.parse_stack()
- self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
-
- def test_resource_create_failed(self):
- s = self.parse_stack()
- with mock.patch.object(watchrule.WatchRule, 'store') as bad_store:
- bad_store.side_effect = KeyError('any random failure')
- task_func = scheduler.TaskRunner(s['test_me'].create)
- self.assertRaises(exception.ResourceFailure, task_func)
-
- def test_resource_delete_good(self):
- s = self.parse_stack()
- self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
- self.assertIsNone(scheduler.TaskRunner(s['test_me'].delete)())
-
- def test_resource_delete_notfound(self):
- # if a resource is not found, handle_delete() should not raise
- # an exception.
- s = self.parse_stack()
- self.assertIsNone(scheduler.TaskRunner(s['test_me'].create)())
- res_name = self.stack['test_me'].physical_resource_name()
- self.wr = watchrule.WatchRule.load(self.ctx,
- watch_name=res_name)
-
- with mock.patch.object(watchrule.WatchRule, 'destroy') as bad_destroy:
- watch_exc = exception.EntityNotFound(entity='Watch Rule',
- name='test')
- bad_destroy.side_effect = watch_exc
- self.assertIsNone(scheduler.TaskRunner(s['test_me'].delete)())
-
- def _get_watch_rule(self):
- stack = self.parse_stack()
- res = stack['test_me']
- res.state_set(res.CREATE, res.COMPLETE)
- return res
-
- @mock.patch.object(cloud_watch.watchrule.WatchRule, 'load')
- def test_check(self, mock_lock):
- res = self._get_watch_rule()
-
- scheduler.TaskRunner(res.check)()
- self.assertEqual((res.CHECK, res.COMPLETE), res.state)
-
- @mock.patch.object(cloud_watch.watchrule.WatchRule, 'load')
- def test_check_fail(self, mock_load):
- res = self._get_watch_rule()
- exc = cloud_watch.exception.EntityNotFound(entity='Watch Rule',
- name='Boom')
- mock_load.side_effect = exc
-
- self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(res.check))
- self.assertEqual((res.CHECK, res.FAILED), res.state)
- self.assertIn('Boom', res.status_reason)
diff --git a/heat/tests/openstack/heat/test_cw_alarm.py b/heat/tests/openstack/heat/test_cw_alarm.py
deleted file mode 100644
index 3ae4f57d6..000000000
--- a/heat/tests/openstack/heat/test_cw_alarm.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import copy
-
-from heat.common import template_format
-from heat.engine import resource
-from heat.engine.resources.openstack.heat import cloud_watch
-from heat.engine import rsrc_defn
-from heat.engine import scheduler
-from heat.engine import watchrule
-from heat.tests import common
-from heat.tests import utils
-
-
-alarm_template = '''
-{
- "AWSTemplateFormatVersion" : "2010-09-09",
- "Description" : "Alarm Test",
- "Parameters" : {},
- "Resources" : {
- "MEMAlarmHigh": {
- "Type": "AWS::CloudWatch::Alarm",
- "Properties": {
- "AlarmDescription": "Scale-up if MEM > 50% for 1 minute",
- "MetricName": "MemoryUtilization",
- "Namespace": "system/linux",
- "Statistic": "Average",
- "Period": "60",
- "EvaluationPeriods": "1",
- "Threshold": "50",
- "AlarmActions": [],
- "Dimensions": [],
- "ComparisonOperator": "GreaterThanThreshold"
- }
- }
- }
-}
-'''
-
-
-class CloudWatchAlarmTest(common.HeatTestCase):
-
- def create_alarm(self, t, stack, resource_name):
- resource_defns = stack.t.resource_definitions(stack)
- rsrc = cloud_watch.CloudWatchAlarm(resource_name,
- resource_defns[resource_name],
- stack)
- self.assertIsNone(rsrc.validate())
- scheduler.TaskRunner(rsrc.create)()
- self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
- return rsrc
-
- def test_mem_alarm_high_update_no_replace(self):
- """Test case for updating the alarm with updatable properties.
-
- Make sure that we can change the updatable properties
- without replacing the Alarm rsrc.
- """
- t = template_format.parse(alarm_template)
-
- # short circuit the alarm's references
- properties = t['Resources']['MEMAlarmHigh']['Properties']
- properties['AlarmActions'] = ['a']
- properties['Dimensions'] = [{'a': 'v'}]
-
- stack = utils.parse_stack(t)
- # the watch rule needs a valid stack_id
- stack.store()
-
- self.m.ReplayAll()
- rsrc = self.create_alarm(t, stack, 'MEMAlarmHigh')
- props = copy.copy(rsrc.properties.data)
- props.update({
- 'ComparisonOperator': 'LessThanThreshold',
- 'AlarmDescription': 'fruity',
- 'EvaluationPeriods': '2',
- 'Period': '90',
- 'Statistic': 'Maximum',
- 'Threshold': '39',
- })
- snippet = rsrc_defn.ResourceDefinition(rsrc.name,
- rsrc.type(),
- props)
-
- scheduler.TaskRunner(rsrc.update, snippet)()
-
- scheduler.TaskRunner(rsrc.delete)()
- self.m.VerifyAll()
-
- def test_mem_alarm_high_update_replace(self):
- """Test case for replacing the alarm with non-updatable properties.
-
- Make sure that the Alarm resource IS replaced when non-update-able
- properties are changed.
- """
- t = template_format.parse(alarm_template)
-
- # short circuit the alarm's references
- properties = t['Resources']['MEMAlarmHigh']['Properties']
- properties['AlarmActions'] = ['a']
- properties['Dimensions'] = [{'a': 'v'}]
-
- stack = utils.parse_stack(t)
- # the watch rule needs a valid stack_id
- stack.store()
-
- self.m.ReplayAll()
- rsrc = self.create_alarm(t, stack, 'MEMAlarmHigh')
- props = copy.copy(rsrc.properties.data)
- props['MetricName'] = 'temp'
- snippet = rsrc_defn.ResourceDefinition(rsrc.name,
- rsrc.type(),
- props)
-
- updater = scheduler.TaskRunner(rsrc.update, snippet)
- self.assertRaises(resource.UpdateReplace, updater)
-
- scheduler.TaskRunner(rsrc.delete)()
- self.m.VerifyAll()
-
- def test_suspend_resume(self):
- t = template_format.parse(alarm_template)
- stack_name = "test_cw_alarm_sus_res_stack"
- stack = utils.parse_stack(t, stack_name=stack_name)
- # the watch rule needs a valid stack_id
- stack.store()
-
- self.m.ReplayAll()
- rsrc = self.create_alarm(t, stack, 'MEMAlarmHigh')
- scheduler.TaskRunner(rsrc.suspend)()
- self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
-
- self.ctx = utils.dummy_context()
-
- wr = watchrule.WatchRule.load(
- self.ctx, watch_name="%s-MEMAlarmHigh" % stack_name)
-
- self.assertEqual(watchrule.WatchRule.SUSPENDED, wr.state)
-
- scheduler.TaskRunner(rsrc.resume)()
- self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
-
- wr = watchrule.WatchRule.load(
- self.ctx, watch_name="%s-MEMAlarmHigh" % stack_name)
-
- self.assertEqual(watchrule.WatchRule.NODATA, wr.state)
-
- scheduler.TaskRunner(rsrc.delete)()
- self.m.VerifyAll()
diff --git a/heat/tests/openstack/heat/test_deployed_server.py b/heat/tests/openstack/heat/test_deployed_server.py
index 9ca0813b4..390e4f28f 100644
--- a/heat/tests/openstack/heat/test_deployed_server.py
+++ b/heat/tests/openstack/heat/test_deployed_server.py
@@ -128,9 +128,6 @@ resources:
class DeployedServersTest(common.HeatTestCase):
- def setUp(self):
- super(DeployedServersTest, self).setUp()
-
def _create_test_server(self, name, override_name=False):
server = self._setup_test_server(name, override_name)
scheduler.TaskRunner(server.create)()
diff --git a/heat/tests/openstack/heat/test_instance_group.py b/heat/tests/openstack/heat/test_instance_group.py
index fb98e11d8..a380d0208 100644
--- a/heat/tests/openstack/heat/test_instance_group.py
+++ b/heat/tests/openstack/heat/test_instance_group.py
@@ -134,6 +134,33 @@ class TestInstanceGroup(common.HeatTestCase):
self.instance_group.resize.assert_called_once_with(5)
def test_attributes(self):
+ get_output = mock.Mock(return_value={'z': '2.1.3.1',
+ 'x': '2.1.3.2',
+ 'c': '2.1.3.3'})
+ self.instance_group.get_output = get_output
+ inspector = self.instance_group._group_data()
+ inspector.member_names = mock.Mock(return_value=['z', 'x', 'c'])
+ res = self.instance_group._resolve_attribute('InstanceList')
+ self.assertEqual('2.1.3.1,2.1.3.2,2.1.3.3', res)
+ get_output.assert_called_once_with('InstanceList')
+
+ def test_attributes_format_fallback(self):
+ self.instance_group.get_output = mock.Mock(return_value=['2.1.3.2',
+ '2.1.3.1',
+ '2.1.3.3'])
+ mock_members = self.patchobject(grouputils, 'get_members')
+ instances = []
+ for ip_ex in six.moves.range(1, 4):
+ inst = mock.Mock()
+ inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
+ instances.append(inst)
+ mock_members.return_value = instances
+ res = self.instance_group._resolve_attribute('InstanceList')
+ self.assertEqual('2.1.3.1,2.1.3.2,2.1.3.3', res)
+
+ def test_attributes_fallback(self):
+ self.instance_group.get_output = mock.Mock(
+ side_effect=exception.NotFound)
mock_members = self.patchobject(grouputils, 'get_members')
instances = []
for ip_ex in six.moves.range(1, 4):
@@ -411,22 +438,21 @@ class ResizeWithFailedInstancesTest(InstanceGroupWithNestedStack):
def setUp(self):
super(ResizeWithFailedInstancesTest, self).setUp()
- self.group._nested = self.get_fake_nested_stack(4)
- self.nested = self.group.nested()
- self.group.nested = mock.Mock(return_value=self.nested)
+ nested = self.get_fake_nested_stack(4)
- def set_failed_instance(self, instance):
- for r in six.itervalues(self.group.nested()):
- if r.name == instance:
- r.status = "FAILED"
+ inspector = mock.Mock(spec=grouputils.GroupInspector)
+ self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
+ return_value=inspector)
+ inspector.member_names.return_value = (self.failed +
+ sorted(self.content -
+ set(self.failed)))
+ inspector.template.return_value = nested.defn._template
def test_resize(self):
- for inst in self.failed:
- self.set_failed_instance(inst)
self.group.resize(self.size)
tmpl = self.group.update_with_template.call_args[0][0]
- resources = tmpl.resource_definitions(self.group.nested())
- self.assertEqual(set(resources.keys()), self.content)
+ resources = tmpl.resource_definitions(None)
+ self.assertEqual(self.content, set(resources.keys()))
class TestGetBatches(common.HeatTestCase):
diff --git a/heat/tests/openstack/heat/test_random_string.py b/heat/tests/openstack/heat/test_random_string.py
index 5f35578d6..0445478cb 100644
--- a/heat/tests/openstack/heat/test_random_string.py
+++ b/heat/tests/openstack/heat/test_random_string.py
@@ -116,7 +116,7 @@ Resources:
self.assert_min('[0-9]', random_string, 1)
self.assert_min('[A-Z]', random_string, 1)
self.assert_min('[a-z]', random_string, 20)
- self.assert_min('[(),\[\]{}]', random_string, 1)
+ self.assert_min(r'[(),\[\]{}]', random_string, 1)
self.assert_min('[$_]', random_string, 2)
self.assert_min('@', random_string, 5)
self.assertEqual(secret3.FnGetRefId(), random_string)
@@ -132,7 +132,7 @@ Resources:
secret5 = stack['secret5']
random_string = secret5.FnGetAtt('value')
self.assertEqual(10, len(random_string))
- self.assert_min('[(),\[\]{}]', random_string, 1)
+ self.assert_min(r'[(),\[\]{}]', random_string, 1)
self.assert_min('[$_]', random_string, 2)
self.assert_min('@', random_string, 5)
self.assertEqual(secret5.FnGetRefId(), random_string)
diff --git a/heat/tests/openstack/heat/test_resource_chain.py b/heat/tests/openstack/heat/test_resource_chain.py
index f352ec561..282500c49 100644
--- a/heat/tests/openstack/heat/test_resource_chain.py
+++ b/heat/tests/openstack/heat/test_resource_chain.py
@@ -13,9 +13,9 @@
import copy
import mock
+import six
from heat.common import exception
-from heat.common import grouputils
from heat.engine import node_data
from heat.engine.resources.openstack.heat import resource_chain
from heat.engine import rsrc_defn
@@ -129,6 +129,12 @@ class ResourceChainTest(common.HeatTestCase):
chain = self._create_chain(TEMPLATE)
chain.validate_nested_stack()
+ def test_validate_reference_attr_with_none_ref(self):
+ chain = self._create_chain(TEMPLATE)
+ self.patchobject(chain, 'referenced_attrs',
+ return_value=set([('config', None)]))
+ self.assertIsNone(chain.validate())
+
def test_validate_incompatible_properties(self):
# Tests a resource in the chain that does not support the properties
# specified to each resource.
@@ -212,13 +218,6 @@ class ResourceChainTest(common.HeatTestCase):
chain = resource_chain.ResourceChain('test', snip, self.stack)
return chain
- @mock.patch.object(grouputils, 'get_rsrc_id')
- def test_get_attribute(self, mock_get_rsrc_id):
- stack = utils.parse_stack(TEMPLATE)
- mock_get_rsrc_id.side_effect = ['0', '1']
- rsrc = stack['test-chain']
- self.assertEqual(['0', '1'], rsrc.FnGetAtt(rsrc.REFS))
-
def test_get_attribute_convg(self):
cache_data = {'test-chain': node_data.NodeData.from_dict({
'uuid': mock.ANY,
@@ -230,3 +229,163 @@ class ResourceChainTest(common.HeatTestCase):
stack = utils.parse_stack(TEMPLATE, cache_data=cache_data)
rsrc = stack.defn['test-chain']
self.assertEqual(['rsrc1', 'rsrc2'], rsrc.FnGetAtt('refs'))
+
+
+class ResourceChainAttrTest(common.HeatTestCase):
+ def test_aggregate_attribs(self):
+ """Test attribute aggregation.
+
+ Test attribute aggregation and that we mimic the nested resource's
+ attributes.
+ """
+ chain = self._create_dummy_stack()
+ expected = ['0', '1']
+ self.assertEqual(expected, chain.FnGetAtt('foo'))
+ self.assertEqual(expected, chain.FnGetAtt('Foo'))
+
+ def test_index_dotted_attribs(self):
+ """Test attribute aggregation.
+
+ Test attribute aggregation and that we mimic the nested resource's
+ attributes.
+ """
+ chain = self._create_dummy_stack()
+ self.assertEqual('0', chain.FnGetAtt('resource.0.Foo'))
+ self.assertEqual('1', chain.FnGetAtt('resource.1.Foo'))
+
+ def test_index_path_attribs(self):
+ """Test attribute aggregation.
+
+ Test attribute aggregation and that we mimic the nested resource's
+ attributes.
+ """
+ chain = self._create_dummy_stack()
+ self.assertEqual('0', chain.FnGetAtt('resource.0', 'Foo'))
+ self.assertEqual('1', chain.FnGetAtt('resource.1', 'Foo'))
+
+ def test_index_deep_path_attribs(self):
+ """Test attribute aggregation.
+
+ Test attribute aggregation and that we mimic the nested resource's
+ attributes.
+ """
+ chain = self._create_dummy_stack(expect_attrs={'0': 2, '1': 3})
+ self.assertEqual(2, chain.FnGetAtt('resource.0',
+ 'nested_dict', 'dict', 'b'))
+ self.assertEqual(3, chain.FnGetAtt('resource.1',
+ 'nested_dict', 'dict', 'b'))
+
+ def test_aggregate_deep_path_attribs(self):
+ """Test attribute aggregation.
+
+ Test attribute aggregation and that we mimic the nested resource's
+ attributes.
+ """
+ chain = self._create_dummy_stack(expect_attrs={'0': 3, '1': 3})
+ expected = [3, 3]
+ self.assertEqual(expected, chain.FnGetAtt('nested_dict', 'list', 2))
+
+ def test_aggregate_refs(self):
+ """Test resource id aggregation."""
+ chain = self._create_dummy_stack()
+ expected = ['ID-0', 'ID-1']
+ self.assertEqual(expected, chain.FnGetAtt("refs"))
+
+ def test_aggregate_refs_with_index(self):
+ """Test resource id aggregation with index."""
+ chain = self._create_dummy_stack()
+ expected = ['ID-0', 'ID-1']
+ self.assertEqual(expected[0], chain.FnGetAtt("refs", 0))
+ self.assertEqual(expected[1], chain.FnGetAtt("refs", 1))
+ self.assertIsNone(chain.FnGetAtt("refs", 2))
+
+ def test_aggregate_outputs(self):
+ """Test outputs aggregation."""
+ expected = {'0': ['foo', 'bar'], '1': ['foo', 'bar']}
+ chain = self._create_dummy_stack(expect_attrs=expected)
+ self.assertEqual(expected, chain.FnGetAtt('attributes', 'list'))
+
+ def test_aggregate_outputs_no_path(self):
+ """Test outputs aggregation with missing path."""
+ chain = self._create_dummy_stack()
+ self.assertRaises(exception.InvalidTemplateAttribute,
+ chain.FnGetAtt, 'attributes')
+
+ def test_index_refs(self):
+ """Tests getting ids of individual resources."""
+ chain = self._create_dummy_stack()
+ self.assertEqual("ID-0", chain.FnGetAtt('resource.0'))
+ self.assertEqual("ID-1", chain.FnGetAtt('resource.1'))
+ ex = self.assertRaises(exception.NotFound, chain.FnGetAtt,
+ 'resource.2')
+ self.assertIn("Member '2' not found in group resource 'test'",
+ six.text_type(ex))
+
+ def _create_dummy_stack(self, expect_count=2, expect_attrs=None):
+ self.stack = utils.parse_stack(TEMPLATE)
+ snip = self.stack.t.resource_definitions(self.stack)['test-chain']
+ chain = resource_chain.ResourceChain('test', snip, self.stack)
+ attrs = {}
+ refids = {}
+ if expect_attrs is None:
+ expect_attrs = {}
+ for index in range(expect_count):
+ res = str(index)
+ attrs[index] = expect_attrs.get(res, res)
+ refids[index] = 'ID-%s' % res
+
+ names = [str(name) for name in range(expect_count)]
+ chain._resource_names = mock.Mock(return_value=names)
+ self._stub_get_attr(chain, refids, attrs)
+ return chain
+
+ def _stub_get_attr(self, chain, refids, attrs):
+ def ref_id_fn(res_name):
+ return refids[int(res_name)]
+
+ def attr_fn(args):
+ res_name = args[0]
+ return attrs[int(res_name)]
+
+ def get_output(output_name):
+ outputs = chain._nested_output_defns(chain._resource_names(),
+ attr_fn, ref_id_fn)
+ op_defns = {od.name: od for od in outputs}
+ if output_name not in op_defns:
+ raise exception.NotFound('Specified output key %s not found.' %
+ output_name)
+ return op_defns[output_name].get_value()
+
+ orig_get_attr = chain.FnGetAtt
+
+ def get_attr(attr_name, *path):
+ if not path:
+ attr = attr_name
+ else:
+ attr = (attr_name,) + path
+ # Mock referenced_attrs() so that _nested_output_definitions()
+ # will include the output required for this attribute
+ chain.referenced_attrs = mock.Mock(return_value=[attr])
+
+ # Pass through to actual function under test
+ return orig_get_attr(attr_name, *path)
+
+ chain.FnGetAtt = mock.Mock(side_effect=get_attr)
+ chain.get_output = mock.Mock(side_effect=get_output)
+
+
+class ResourceChainAttrFallbackTest(ResourceChainAttrTest):
+ def _stub_get_attr(self, chain, refids, attrs):
+ # Raise NotFound when getting output, to force fallback to old-school
+ # grouputils functions
+ chain.get_output = mock.Mock(side_effect=exception.NotFound)
+
+ def make_fake_res(idx):
+ fr = mock.Mock()
+ fr.stack = chain.stack
+ fr.FnGetRefId.return_value = refids[idx]
+ fr.FnGetAtt.return_value = attrs[idx]
+ return fr
+
+ fake_res = {str(i): make_fake_res(i) for i in refids}
+ chain.nested = mock.Mock(return_value=fake_res)
diff --git a/heat/tests/openstack/heat/test_resource_group.py b/heat/tests/openstack/heat/test_resource_group.py
index 4090eefdf..deb59c6c9 100644
--- a/heat/tests/openstack/heat/test_resource_group.py
+++ b/heat/tests/openstack/heat/test_resource_group.py
@@ -138,7 +138,11 @@ template_server = {
class ResourceGroupTest(common.HeatTestCase):
def setUp(self):
- common.HeatTestCase.setUp(self)
+ super(ResourceGroupTest, self).setUp()
+
+ self.inspector = mock.Mock(spec=grouputils.GroupInspector)
+ self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
+ return_value=self.inspector)
def test_assemble_nested(self):
"""Tests nested stack creation based on props.
@@ -170,9 +174,70 @@ class ResourceGroupTest(common.HeatTestCase):
"Foo": "Bar"
}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ "1": {"get_resource": "1"},
+ "2": {"get_resource": "2"},
+ }
+ }
+ }
+ }
+
+ self.assertEqual(templ, resg._assemble_nested(['0', '1', '2']).t)
+
+ def test_assemble_nested_outputs(self):
+ """Tests nested stack creation based on props.
+
+ Tests that the nested stack that implements the group is created
+ appropriately based on properties.
+ """
+ stack = utils.parse_stack(template)
+ snip = stack.t.resource_definitions(stack)['group1']
+ resg = resource_group.ResourceGroup('test', snip, stack)
+ templ = {
+ "heat_template_version": "2015-04-30",
+ "resources": {
+ "0": {
+ "type": "OverwrittenFnGetRefIdType",
+ "properties": {
+ "Foo": "Bar"
+ }
+ },
+ "1": {
+ "type": "OverwrittenFnGetRefIdType",
+ "properties": {
+ "Foo": "Bar"
+ }
+ },
+ "2": {
+ "type": "OverwrittenFnGetRefIdType",
+ "properties": {
+ "Foo": "Bar"
+ }
+ }
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ "1": {"get_resource": "1"},
+ "2": {"get_resource": "2"},
+ }
+ },
+ "foo": {
+ "value": [
+ {"get_attr": ["0", "foo"]},
+ {"get_attr": ["1", "foo"]},
+ {"get_attr": ["2", "foo"]},
+ ]
+ }
}
}
+ resg.referenced_attrs = mock.Mock(return_value=["foo"])
self.assertEqual(templ, resg._assemble_nested(['0', '1', '2']).t)
def test_assemble_nested_include(self):
@@ -189,6 +254,13 @@ class ResourceGroupTest(common.HeatTestCase):
"type": "OverwrittenFnGetRefIdType",
"properties": {}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ }
+ }
}
}
self.assertEqual(expect, resg._assemble_nested(['0']).t)
@@ -204,6 +276,7 @@ class ResourceGroupTest(common.HeatTestCase):
resg = resource_group.ResourceGroup('test', snip, stack)
expect = {
"heat_template_version": "2015-04-30",
+ "outputs": {"refs_map": {"value": {}}},
}
self.assertEqual(expect, resg._assemble_nested([]).t)
@@ -229,6 +302,13 @@ class ResourceGroupTest(common.HeatTestCase):
'role': 'webserver'
}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ }
+ }
}
}
self.assertEqual(expect, resg._assemble_nested(['0']).t)
@@ -249,6 +329,14 @@ class ResourceGroupTest(common.HeatTestCase):
"foo": "baz"
}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ "1": {"get_resource": "1"},
+ }
+ }
}
}
resource_def = rsrc_defn.ResourceDefinition(
@@ -259,10 +347,59 @@ class ResourceGroupTest(common.HeatTestCase):
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
- resg._nested = get_fake_nested_stack(['0', '1'])
+ nested = get_fake_nested_stack(['0', '1'])
+ self.inspector.template.return_value = nested.defn._template
+ self.inspector.member_names.return_value = ['0', '1']
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1).t)
+ def test_assemble_nested_rolling_update_outputs(self):
+ expect = {
+ "heat_template_version": "2015-04-30",
+ "resources": {
+ "0": {
+ "type": "OverwrittenFnGetRefIdType",
+ "properties": {
+ "foo": "bar"
+ }
+ },
+ "1": {
+ "type": "OverwrittenFnGetRefIdType",
+ "properties": {
+ "foo": "baz"
+ }
+ }
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ "1": {"get_resource": "1"},
+ }
+ },
+ "bar": {
+ "value": [
+ {"get_attr": ["0", "bar"]},
+ {"get_attr": ["1", "bar"]},
+ ]
+ }
+ }
+ }
+ resource_def = rsrc_defn.ResourceDefinition(
+ None,
+ "OverwrittenFnGetRefIdType",
+ {"foo": "baz"})
+
+ stack = utils.parse_stack(template)
+ snip = stack.t.resource_definitions(stack)['group1']
+ resg = resource_group.ResourceGroup('test', snip, stack)
+ nested = get_fake_nested_stack(['0', '1'])
+ self.inspector.template.return_value = nested.defn._template
+ self.inspector.member_names.return_value = ['0', '1']
+ resg.build_resource_definition = mock.Mock(return_value=resource_def)
+ resg.referenced_attrs = mock.Mock(return_value=["bar"])
+ self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1).t)
+
def test_assemble_nested_rolling_update_none(self):
expect = {
"heat_template_version": "2015-04-30",
@@ -279,6 +416,14 @@ class ResourceGroupTest(common.HeatTestCase):
"foo": "bar"
}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ "1": {"get_resource": "1"},
+ }
+ }
}
}
@@ -290,7 +435,9 @@ class ResourceGroupTest(common.HeatTestCase):
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
- resg._nested = get_fake_nested_stack(['0', '1'])
+ nested = get_fake_nested_stack(['0', '1'])
+ self.inspector.template.return_value = nested.defn._template
+ self.inspector.member_names.return_value = ['0', '1']
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 0).t)
@@ -310,6 +457,14 @@ class ResourceGroupTest(common.HeatTestCase):
"foo": "bar"
}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ "1": {"get_resource": "1"},
+ }
+ }
}
}
resource_def = rsrc_defn.ResourceDefinition(
@@ -320,9 +475,9 @@ class ResourceGroupTest(common.HeatTestCase):
stack = utils.parse_stack(template)
snip = stack.t.resource_definitions(stack)['group1']
resg = resource_group.ResourceGroup('test', snip, stack)
- resg._nested = get_fake_nested_stack(['0', '1'])
- res0 = resg._nested['0']
- res0.status = res0.FAILED
+ nested = get_fake_nested_stack(['0', '1'])
+ self.inspector.template.return_value = nested.defn._template
+ self.inspector.member_names.return_value = ['1']
resg.build_resource_definition = mock.Mock(return_value=resource_def)
self.assertEqual(expect, resg._assemble_for_rolling_update(2, 1).t)
@@ -355,6 +510,14 @@ class ResourceGroupTest(common.HeatTestCase):
"type": "OverwrittenFnGetRefIdType",
"properties": {}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ "1": {"get_resource": "1"},
+ }
+ }
}
}
self.assertEqual(expected, nested_tmpl.t)
@@ -394,6 +557,15 @@ class ResourceGroupTest(common.HeatTestCase):
]
}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ "1": {"get_resource": "1"},
+ "2": {"get_resource": "2"},
+ }
+ }
}
}
nested = resg._assemble_nested(['0', '1', '2']).t
@@ -420,6 +592,13 @@ class ResourceGroupTest(common.HeatTestCase):
]
}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ }
+ }
}
}
nested = resg._assemble_nested(['0']).t
@@ -449,6 +628,13 @@ class ResourceGroupTest(common.HeatTestCase):
]
}
}
+ },
+ "outputs": {
+ "refs_map": {
+ "value": {
+ "0": {"get_resource": "0"},
+ }
+ }
}
}
nested = resg._assemble_nested(['0']).t
@@ -507,6 +693,14 @@ class ResourceGroupTest(common.HeatTestCase):
resgrp = resource_group.ResourceGroup('test', snip, stack)
self.assertIsNone(resgrp.validate())
+ def test_validate_reference_attr_with_none_ref(self):
+ stack = utils.parse_stack(template_attr)
+ snip = stack.t.resource_definitions(stack)['group1']
+ resgrp = resource_group.ResourceGroup('test', snip, stack)
+ self.patchobject(resgrp, 'referenced_attrs',
+ return_value=set([('nested_dict', None)]))
+ self.assertIsNone(resgrp.validate())
+
def test_invalid_removal_policies_nolist(self):
"""Test that error raised for malformed removal_policies."""
tmp = copy.deepcopy(template)
@@ -564,6 +758,8 @@ class ResourceGroupTest(common.HeatTestCase):
self.assertEqual(1, resgrp.create_with_template.call_count)
def test_handle_create_with_batching(self):
+ self.inspector.member_names.return_value = []
+ self.inspector.size.return_value = 0
stack = utils.parse_stack(tmpl_with_default_updt_policy())
defn = stack.t.resource_definitions(stack)['group1']
props = stack.t.t['resources']['group1']['properties'].copy()
@@ -576,6 +772,8 @@ class ResourceGroupTest(common.HeatTestCase):
self.assertEqual(4, len(checkers))
def test_handle_create_with_batching_zero_count(self):
+ self.inspector.member_names.return_value = []
+ self.inspector.size.return_value = 0
stack = utils.parse_stack(tmpl_with_default_updt_policy())
defn = stack.t.resource_definitions(stack)['group1']
props = stack.t.t['resources']['group1']['properties'].copy()
@@ -583,9 +781,9 @@ class ResourceGroupTest(common.HeatTestCase):
update_policy = {'batch_create': {'max_batch_size': 1}}
snip = defn.freeze(properties=props, update_policy=update_policy)
resgrp = resource_group.ResourceGroup('test', snip, stack)
- self.patchobject(scheduler.TaskRunner, 'start')
- checkers = resgrp.handle_create()
- self.assertEqual(0, len(checkers))
+ resgrp.create_with_template = mock.Mock(return_value=None)
+ self.assertIsNone(resgrp.handle_create())
+ self.assertEqual(1, resgrp.create_with_template.call_count)
def test_run_to_completion(self):
stack = utils.parse_stack(template2)
@@ -604,7 +802,7 @@ class ResourceGroupTest(common.HeatTestCase):
resgrp._assemble_nested = mock.Mock(return_value='tmpl')
resgrp.properties.data[resgrp.COUNT] = 2
self.patchobject(scheduler.TaskRunner, 'start')
- resgrp.handle_update(snip, None, None)
+ resgrp.handle_update(snip, mock.Mock(), {})
self.assertTrue(resgrp._assemble_nested.called)
def test_handle_delete(self):
@@ -622,7 +820,7 @@ class ResourceGroupTest(common.HeatTestCase):
resgrp._assemble_nested = mock.Mock(return_value=None)
resgrp.properties.data[resgrp.COUNT] = 5
self.patchobject(scheduler.TaskRunner, 'start')
- resgrp.handle_update(snip, None, None)
+ resgrp.handle_update(snip, mock.Mock(), {})
self.assertTrue(resgrp._assemble_nested.called)
@@ -635,39 +833,63 @@ class ResourceGroupBlackList(common.HeatTestCase):
# 4) resource_list (refid) not in nested()
# 5) resource_list in nested() -> saved
# 6) resource_list (refid) in nested() -> saved
+ # 7) resource_list (refid) in nested(), update -> saved
+ # 8) resource_list, update -> saved
+ # 9) resource_list (refid) in nested(), grouputils fallback -> saved
+ # A) resource_list (refid) in nested(), update, grouputils -> saved
scenarios = [
('1', dict(data_in=None, rm_list=[],
nested_rsrcs=[], expected=[],
- saved=False)),
+ saved=False, fallback=False, rm_mode='append')),
('2', dict(data_in='0,1,2', rm_list=[],
nested_rsrcs=[], expected=['0', '1', '2'],
- saved=False)),
+ saved=False, fallback=False, rm_mode='append')),
('3', dict(data_in='1,3', rm_list=['6'],
nested_rsrcs=['0', '1', '3'],
expected=['1', '3'],
- saved=False)),
+ saved=False, fallback=False, rm_mode='append')),
('4', dict(data_in='0,1', rm_list=['id-7'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1'],
- saved=False)),
+ saved=False, fallback=False, rm_mode='append')),
('5', dict(data_in='0,1', rm_list=['3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
- saved=True)),
+ saved=True, fallback=False, rm_mode='append')),
('6', dict(data_in='0,1', rm_list=['id-3'],
nested_rsrcs=['0', '1', '3'],
expected=['0', '1', '3'],
- saved=True)),
+ saved=True, fallback=False, rm_mode='append')),
+ ('7', dict(data_in='0,1', rm_list=['id-3'],
+ nested_rsrcs=['0', '1', '3'],
+ expected=['3'],
+ saved=True, fallback=False, rm_mode='update')),
+ ('8', dict(data_in='1', rm_list=[],
+ nested_rsrcs=['0', '1', '2'],
+ expected=[],
+ saved=True, fallback=False, rm_mode='update')),
+ ('9', dict(data_in='0,1', rm_list=['id-3'],
+ nested_rsrcs=['0', '1', '3'],
+ expected=['0', '1', '3'],
+ saved=True, fallback=True, rm_mode='append')),
+ ('A', dict(data_in='0,1', rm_list=['id-3'],
+ nested_rsrcs=['0', '1', '3'],
+ expected=['3'],
+ saved=True, fallback=True, rm_mode='update')),
]
def test_blacklist(self):
stack = utils.parse_stack(template)
resg = stack['group1']
+ if self.data_in is not None:
+ resg.resource_id = 'foo'
+
# mock properties
- resg.properties = mock.MagicMock()
- resg.properties.__getitem__.return_value = [
- {'resource_list': self.rm_list}]
+ properties = mock.MagicMock()
+ p_data = {'removal_policies': [{'resource_list': self.rm_list}],
+ 'removal_policies_mode': self.rm_mode}
+ properties.get.side_effect = p_data.get
# mock data get/set
resg.data = mock.Mock()
@@ -675,28 +897,41 @@ class ResourceGroupBlackList(common.HeatTestCase):
resg.data_set = mock.Mock()
# mock nested access
- def stack_contains(name):
- return name in self.nested_rsrcs
-
- def by_refid(name):
- rid = name.replace('id-', '')
- if rid not in self.nested_rsrcs:
- return None
- res = mock.Mock()
- res.name = rid
- return res
-
- nested = mock.MagicMock()
- nested.__contains__.side_effect = stack_contains
- nested.__iter__.side_effect = iter(self.nested_rsrcs)
- nested.resource_by_refid.side_effect = by_refid
- resg.nested = mock.Mock(return_value=nested)
-
- blacklist = resg._name_blacklist()
- self.assertEqual(set(self.expected), blacklist)
+ mock_inspect = mock.Mock()
+ self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
+ return_value=mock_inspect)
+ mock_inspect.member_names.return_value = self.nested_rsrcs
+
+ if not self.fallback:
+ refs_map = {n: 'id-%s' % n for n in self.nested_rsrcs}
+ resg.get_output = mock.Mock(return_value=refs_map)
+ else:
+ resg.get_output = mock.Mock(side_effect=exception.NotFound)
+
+ def stack_contains(name):
+ return name in self.nested_rsrcs
+
+ def by_refid(name):
+ rid = name.replace('id-', '')
+ if rid not in self.nested_rsrcs:
+ return None
+ res = mock.Mock()
+ res.name = rid
+ return res
+
+ nested = mock.MagicMock()
+ nested.__contains__.side_effect = stack_contains
+ nested.__iter__.side_effect = iter(self.nested_rsrcs)
+ nested.resource_by_refid.side_effect = by_refid
+ resg.nested = mock.Mock(return_value=nested)
+
+ resg._update_name_blacklist(properties)
if self.saved:
resg.data_set.assert_called_once_with('name_blacklist',
- ','.join(blacklist))
+ ','.join(self.expected))
+ else:
+ resg.data_set.assert_not_called()
+ self.assertEqual(set(self.expected), resg._name_blacklist())
class ResourceGroupEmptyParams(common.HeatTestCase):
@@ -870,13 +1105,6 @@ class ResourceGroupAttrTest(common.HeatTestCase):
self.assertIn("Member '2' not found in group resource 'group1'.",
six.text_type(ex))
- @mock.patch.object(grouputils, 'get_rsrc_id')
- def test_get_attribute(self, mock_get_rsrc_id):
- stack = utils.parse_stack(template)
- mock_get_rsrc_id.side_effect = ['0', '1']
- rsrc = stack['group1']
- self.assertEqual(['0', '1'], rsrc.FnGetAtt(rsrc.REFS))
-
def test_get_attribute_convg(self):
cache_data = {'group1': node_data.NodeData.from_dict({
'uuid': mock.ANY,
@@ -900,24 +1128,78 @@ class ResourceGroupAttrTest(common.HeatTestCase):
expect_attrs=None):
stack = utils.parse_stack(template_data)
resg = stack['group1']
- fake_res = {}
+ resg.resource_id = 'test-test'
+ attrs = {}
+ refids = {}
if expect_attrs is None:
expect_attrs = {}
- for resc in range(expect_count):
- res = str(resc)
- fake_res[res] = mock.Mock()
- fake_res[res].stack = stack
- fake_res[res].FnGetRefId.return_value = 'ID-%s' % res
- if res in expect_attrs:
- fake_res[res].FnGetAtt.return_value = expect_attrs[res]
- else:
- fake_res[res].FnGetAtt.return_value = res
- resg.nested = mock.Mock(return_value=fake_res)
+ for index in range(expect_count):
+ res = str(index)
+ attrs[index] = expect_attrs.get(res, res)
+ refids[index] = 'ID-%s' % res
names = [str(name) for name in range(expect_count)]
resg._resource_names = mock.Mock(return_value=names)
+ self._stub_get_attr(resg, refids, attrs)
return resg
+ def _stub_get_attr(self, resg, refids, attrs):
+ def ref_id_fn(res_name):
+ return refids[int(res_name)]
+
+ def attr_fn(args):
+ res_name = args[0]
+ return attrs[int(res_name)]
+
+ def get_output(output_name):
+ outputs = resg._nested_output_defns(resg._resource_names(),
+ attr_fn, ref_id_fn)
+ op_defns = {od.name: od for od in outputs}
+ self.assertIn(output_name, op_defns)
+ return op_defns[output_name].get_value()
+
+ orig_get_attr = resg.FnGetAtt
+
+ def get_attr(attr_name, *path):
+ if not path:
+ attr = attr_name
+ else:
+ attr = (attr_name,) + path
+ # Mock referenced_attrs() so that _nested_output_definitions()
+ # will include the output required for this attribute
+ resg.referenced_attrs = mock.Mock(return_value=[attr])
+
+ # Pass through to actual function under test
+ return orig_get_attr(attr_name, *path)
+
+ resg.FnGetAtt = mock.Mock(side_effect=get_attr)
+ resg.get_output = mock.Mock(side_effect=get_output)
+
+
+class ResourceGroupAttrFallbackTest(ResourceGroupAttrTest):
+ def _stub_get_attr(self, resg, refids, attrs):
+ # Raise NotFound when getting output, to force fallback to old-school
+ # grouputils functions
+ resg.get_output = mock.Mock(side_effect=exception.NotFound)
+
+ def make_fake_res(idx):
+ fr = mock.Mock()
+ fr.stack = resg.stack
+ fr.FnGetRefId.return_value = refids[idx]
+ fr.FnGetAtt.return_value = attrs[idx]
+ return fr
+
+ fake_res = {str(i): make_fake_res(i) for i in refids}
+ resg.nested = mock.Mock(return_value=fake_res)
+
+ @mock.patch.object(grouputils, 'get_rsrc_id')
+ def test_get_attribute(self, mock_get_rsrc_id):
+ stack = utils.parse_stack(template)
+ mock_get_rsrc_id.side_effect = ['0', '1']
+ rsrc = stack['group1']
+ rsrc.get_output = mock.Mock(side_effect=exception.NotFound)
+ self.assertEqual(['0', '1'], rsrc.FnGetAtt(rsrc.REFS))
+
class ReplaceTest(common.HeatTestCase):
# 1. no min_in_service
@@ -974,6 +1256,12 @@ class ReplaceTest(common.HeatTestCase):
self.group.update_with_template = mock.Mock()
self.group.check_update_complete = mock.Mock()
+ inspector = mock.Mock(spec=grouputils.GroupInspector)
+ self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
+ return_value=inspector)
+ inspector.member_names.return_value = self.existing
+ inspector.size.return_value = len(self.existing)
+
def test_rolling_updates(self):
self.group._nested = get_fake_nested_stack(self.existing)
self.group.get_size = mock.Mock(return_value=self.count)
@@ -981,8 +1269,7 @@ class ReplaceTest(common.HeatTestCase):
return_value=set(self.black_listed))
tasks = self.group._replace(self.min_in_service, self.batch_size,
self.pause_sec)
- self.assertEqual(self.tasks,
- len(tasks))
+ self.assertEqual(self.tasks, len(tasks))
def tmpl_with_bad_updt_policy():
@@ -1095,12 +1382,15 @@ class RollingUpdatePolicyDiffTest(common.HeatTestCase):
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
self.assertTrue(tmpl_diff.update_policy_changed())
+ prop_diff = current_grp.update_template_diff_properties(
+ updated_grp.properties,
+ current_grp.properties)
# test application of the new update policy in handle_update
current_grp._try_rolling_update = mock.Mock()
current_grp._assemble_nested_for_size = mock.Mock()
self.patchobject(scheduler.TaskRunner, 'start')
- current_grp.handle_update(updated_grp_json, tmpl_diff, None)
+ current_grp.handle_update(updated_grp_json, tmpl_diff, prop_diff)
self.assertEqual(updated_grp_json._update_policy or {},
current_grp.update_policy.data)
@@ -1192,12 +1482,16 @@ class TestUtils(common.HeatTestCase):
]
def test_count_black_listed(self):
+ inspector = mock.Mock(spec=grouputils.GroupInspector)
+ self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
+ return_value=inspector)
+ inspector.member_names.return_value = self.existing
+
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
- resgrp._nested = get_fake_nested_stack(self.existing)
resgrp._name_blacklist = mock.Mock(return_value=set(self.black_listed))
- rcount = resgrp._count_black_listed()
+ rcount = resgrp._count_black_listed(self.existing)
self.assertEqual(self.count, rcount)
diff --git a/heat/tests/openstack/heat/test_restarter.py b/heat/tests/openstack/heat/test_restarter.py
deleted file mode 100644
index 94857b736..000000000
--- a/heat/tests/openstack/heat/test_restarter.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from heat.common import template_format
-from heat.engine.clients.os import nova
-from heat.tests import common
-from heat.tests import utils
-
-
-restarter_template = '''
-{
- "AWSTemplateFormatVersion" : "2010-09-09",
- "Description" : "Template to test HARestarter",
- "Parameters" : {},
- "Resources" : {
- "instance": {
- "Type": "OS::Heat::None"
- },
- "restarter": {
- "Type": "OS::Heat::HARestarter",
- "Properties": {
- "InstanceId": {"Ref": "instance"}
- }
- }
- }
-}
-'''
-
-bogus_template = '''
-{
- "AWSTemplateFormatVersion" : "2010-09-09",
- "Description" : "Template to test HARestarter",
- "Parameters" : {},
- "Resources" : {
- "restarter": {
- "Type": "OS::Heat::HARestarter",
- "Properties": {
- "InstanceId": "instance"
- }
- }
- }
-}
-'''
-
-
-class RestarterTest(common.HeatTestCase):
- def create_restarter(self, template=restarter_template):
- snippet = template_format.parse(template)
- self.stack = utils.parse_stack(snippet)
- restarter = self.stack['restarter']
- self.patchobject(nova.NovaClientPlugin, 'get_server',
- return_value=mock.MagicMock())
- restarter.handle_create = mock.Mock(return_value=None)
- self.stack.create()
- return restarter
-
- def test_create(self):
- rsrc = self.create_restarter()
-
- self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
- rsrc.handle_create.assert_called_once_with()
-
- def test_handle_signal(self):
- rsrc = self.create_restarter()
-
- with mock.patch.object(rsrc.stack, 'restart_resource') as rr:
- self.assertIsNone(rsrc.handle_signal())
- rr.assert_called_once_with('instance')
-
- def test_handle_signal_alarm(self):
- rsrc = self.create_restarter()
-
- with mock.patch.object(rsrc.stack, 'restart_resource') as rr:
- self.assertIsNone(rsrc.handle_signal({'state': 'Alarm'}))
- rr.assert_called_once_with('instance')
-
- def test_handle_signal_not_alarm(self):
- rsrc = self.create_restarter()
-
- with mock.patch.object(rsrc.stack, 'restart_resource') as rr:
- self.assertIsNone(rsrc.handle_signal({'state': 'spam'}))
- self.assertEqual([], rr.mock_calls)
-
- def test_handle_signal_no_instance(self):
- rsrc = self.create_restarter(bogus_template)
-
- with mock.patch.object(rsrc.stack, 'restart_resource') as rr:
- self.assertIsNone(rsrc.handle_signal())
- self.assertEqual([], rr.mock_calls)
diff --git a/heat/tests/openstack/heat/test_software_deployment.py b/heat/tests/openstack/heat/test_software_deployment.py
index 13b6d83d4..1ee154994 100644
--- a/heat/tests/openstack/heat/test_software_deployment.py
+++ b/heat/tests/openstack/heat/test_software_deployment.py
@@ -1536,132 +1536,121 @@ class SoftwareDeploymentGroupTest(common.HeatTestCase):
self.assertEqual(templ, resg._assemble_nested(['server1',
'server2']).t)
- def test_attributes(self):
+ def test_validate(self):
stack = utils.parse_stack(self.template)
snip = stack.t.resource_definitions(stack)['deploy_mysql']
- resg = sd.SoftwareDeploymentGroup('test', snip, stack)
- nested = self.patchobject(resg, 'nested')
- server1 = mock.MagicMock()
- server2 = mock.MagicMock()
- nested.return_value = {
- 'server1': server1,
- 'server2': server2
- }
-
- server1.FnGetAtt.return_value = 'Thing happened on server1'
- server2.FnGetAtt.return_value = 'ouch'
- self.assertEqual({
- 'server1': 'Thing happened on server1',
- 'server2': 'ouch'
- }, resg.FnGetAtt('deploy_stdouts'))
-
- server1.FnGetAtt.return_value = ''
- server2.FnGetAtt.return_value = 'Its gone Pete Tong'
- self.assertEqual({
- 'server1': '',
- 'server2': 'Its gone Pete Tong'
- }, resg.FnGetAtt('deploy_stderrs'))
+ resg = sd.SoftwareDeploymentGroup('deploy_mysql', snip, stack)
+ self.assertIsNone(resg.validate())
- server1.FnGetAtt.return_value = 0
- server2.FnGetAtt.return_value = 1
- self.assertEqual({
- 'server1': 0,
- 'server2': 1
- }, resg.FnGetAtt('deploy_status_codes'))
-
- server1.FnGetAtt.assert_has_calls([
- mock.call('deploy_stdout'),
- mock.call('deploy_stderr'),
- mock.call('deploy_status_code'),
- ])
-
- server2.FnGetAtt.assert_has_calls([
- mock.call('deploy_stdout'),
- mock.call('deploy_stderr'),
- mock.call('deploy_status_code'),
- ])
- def test_attributes_path(self):
- stack = utils.parse_stack(self.template)
- snip = stack.t.resource_definitions(stack)['deploy_mysql']
- resg = sd.SoftwareDeploymentGroup('test', snip, stack)
- nested = self.patchobject(resg, 'nested')
- server1 = mock.MagicMock()
- server2 = mock.MagicMock()
- nested.return_value = {
- 'server1': server1,
- 'server2': server2
- }
+class SoftwareDeploymentGroupAttrTest(common.HeatTestCase):
+ scenarios = [
+ ('stdouts', dict(group_attr='deploy_stdouts',
+ nested_attr='deploy_stdout',
+ values=['Thing happened on server1', 'ouch'])),
+ ('stderrs', dict(group_attr='deploy_stderrs',
+ nested_attr='deploy_stderr',
+ values=['', "It's gone Pete Tong"])),
+ ('status_codes', dict(group_attr='deploy_status_codes',
+ nested_attr='deploy_status_code',
+ values=[0, 1])),
+ ('passthrough', dict(group_attr='some_attr',
+ nested_attr='some_attr',
+ values=['attr1', 'attr2'])),
+ ]
- server1.FnGetAtt.return_value = 'Thing happened on server1'
- server2.FnGetAtt.return_value = 'ouch'
- self.assertEqual('Thing happened on server1',
- resg.FnGetAtt('deploy_stdouts', 'server1'))
- self.assertEqual('ouch',
- resg.FnGetAtt('deploy_stdouts', 'server2'))
-
- server1.FnGetAtt.return_value = ''
- server2.FnGetAtt.return_value = 'Its gone Pete Tong'
- self.assertEqual('', resg.FnGetAtt('deploy_stderrs', 'server1'))
- self.assertEqual('Its gone Pete Tong',
- resg.FnGetAtt('deploy_stderrs', 'server2'))
-
- server1.FnGetAtt.return_value = 0
- server2.FnGetAtt.return_value = 1
- self.assertEqual(0, resg.FnGetAtt('deploy_status_codes', 'server1'))
- self.assertEqual(1, resg.FnGetAtt('deploy_status_codes', 'server2'))
-
- server1.FnGetAtt.assert_has_calls([
- mock.call('deploy_stdout'),
- mock.call('deploy_stdout'),
- mock.call('deploy_stderr'),
- mock.call('deploy_stderr'),
- mock.call('deploy_status_code'),
- mock.call('deploy_status_code'),
- ])
-
- server2.FnGetAtt.assert_has_calls([
- mock.call('deploy_stdout'),
- mock.call('deploy_stdout'),
- mock.call('deploy_stderr'),
- mock.call('deploy_stderr'),
- mock.call('deploy_status_code'),
- mock.call('deploy_status_code'),
- ])
-
- def test_attributes_passthrough_key(self):
- '''Prove attributes not in the schema pass-through.'''
- stack = utils.parse_stack(self.template)
- snip = stack.t.resource_definitions(stack)['deploy_mysql']
- resg = sd.SoftwareDeploymentGroup('test', snip, stack)
- nested = self.patchobject(resg, 'nested')
- server1 = mock.MagicMock()
- server2 = mock.MagicMock()
- nested.return_value = {
- 'server1': server1,
- 'server2': server2
+ template = {
+ 'heat_template_version': '2013-05-23',
+ 'resources': {
+ 'deploy_mysql': {
+ 'type': 'OS::Heat::SoftwareDeploymentGroup',
+ 'properties': {
+ 'config': 'config_uuid',
+ 'servers': {'server1': 'uuid1', 'server2': 'uuid2'},
+ 'input_values': {'foo': 'bar'},
+ 'name': '10_config'
+ }
+ }
}
+ }
- server1.FnGetAtt.return_value = 'attr1'
- server2.FnGetAtt.return_value = 'attr2'
- self.assertEqual({
- 'server1': 'attr1',
- 'server2': 'attr2'
- }, resg.FnGetAtt('some_attr'))
+ def setUp(self):
+ super(SoftwareDeploymentGroupAttrTest, self).setUp()
+ self.server_names = ['server1', 'server2']
+ self.servers = [mock.MagicMock() for s in self.server_names]
+ self.stack = utils.parse_stack(self.template)
- server1.FnGetAtt.assert_has_calls([
- mock.call('some_attr'),
- ])
+ def test_attributes(self):
+ resg = self.create_dummy_stack()
+ self.assertEqual(dict(zip(self.server_names, self.values)),
+ resg.FnGetAtt(self.group_attr))
+ self.check_calls()
- server2.FnGetAtt.assert_has_calls([
- mock.call('some_attr'),
- ])
+ def test_attributes_path(self):
+ resg = self.create_dummy_stack()
+ for i, r in enumerate(self.server_names):
+ self.assertEqual(self.values[i],
+ resg.FnGetAtt(self.group_attr, r))
+ self.check_calls(len(self.server_names))
- def test_validate(self):
- stack = utils.parse_stack(self.template)
- snip = stack.t.resource_definitions(stack)['deploy_mysql']
- resg = sd.SoftwareDeploymentGroup('deploy_mysql', snip, stack)
- self.assertIsNone(resg.validate())
+ def create_dummy_stack(self):
+ snip = self.stack.t.resource_definitions(self.stack)['deploy_mysql']
+ resg = sd.SoftwareDeploymentGroup('test', snip, self.stack)
+ resg.resource_id = 'test-test'
+ nested = self.patchobject(resg, 'nested')
+ nested.return_value = dict(zip(self.server_names, self.servers))
+ self._stub_get_attr(resg)
+ return resg
+
+ def _stub_get_attr(self, resg):
+ def ref_id_fn(args):
+ self.fail('Getting member reference ID for some reason')
+
+ def attr_fn(args):
+ res_name = args[0]
+ return self.values[self.server_names.index(res_name)]
+
+ def get_output(output_name):
+ outputs = resg._nested_output_defns(resg._resource_names(),
+ attr_fn, ref_id_fn)
+ op_defns = {od.name: od for od in outputs}
+ self.assertIn(output_name, op_defns)
+ return op_defns[output_name].get_value()
+
+ orig_get_attr = resg.FnGetAtt
+
+ def get_attr(attr_name, *path):
+ if not path:
+ attr = attr_name
+ else:
+ attr = (attr_name,) + path
+ # Mock referenced_attrs() so that _nested_output_definitions()
+ # will include the output required for this attribute
+ resg.referenced_attrs = mock.Mock(return_value=[attr])
+
+ # Pass through to actual function under test
+ return orig_get_attr(attr_name, *path)
+
+ resg.FnGetAtt = mock.Mock(side_effect=get_attr)
+ resg.get_output = mock.Mock(side_effect=get_output)
+
+ def check_calls(self, count=1):
+ pass
+
+
+class SoftwareDeploymentGroupAttrFallbackTest(SoftwareDeploymentGroupAttrTest):
+ def _stub_get_attr(self, resg):
+ # Raise NotFound when getting output, to force fallback to old-school
+ # grouputils functions
+ resg.get_output = mock.Mock(side_effect=exc.NotFound)
+
+ for server, value in zip(self.servers, self.values):
+ server.FnGetAtt.return_value = value
+
+ def check_calls(self, count=1):
+ calls = [mock.call(c) for c in [self.nested_attr] * count]
+ for server in self.servers:
+ server.FnGetAtt.assert_has_calls(calls)
class SDGReplaceTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/heat/test_swiftsignal.py b/heat/tests/openstack/heat/test_swiftsignal.py
index a5bf36d13..b688dea05 100644
--- a/heat/tests/openstack/heat/test_swiftsignal.py
+++ b/heat/tests/openstack/heat/test_swiftsignal.py
@@ -137,7 +137,7 @@ class SwiftSignalHandleTest(common.HeatTestCase):
obj_name = "%s-%s-abcdefghijkl" % (st.name, handle.name)
regexp = ("http://fake-host.com:8080/v1/AUTH_test_tenant/%s/test_st-"
"test_wait_condition_handle-abcdefghijkl"
- "\?temp_url_sig=[0-9a-f]{40}&temp_url_expires=[0-9]{10}"
+ r"\?temp_url_sig=[0-9a-f]{40}&temp_url_expires=[0-9]{10}"
% st.id)
res_id = st.resources['test_wait_condition_handle'].resource_id
self.assertEqual(res_id, handle.physical_resource_name())
@@ -718,7 +718,7 @@ class SwiftSignalTest(common.HeatTestCase):
st.create()
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
expected = ('http://fake-host.com:8080/v1/AUTH_test_tenant/%s/'
- 'test_st-test_wait_condition_handle-abcdefghijkl\?temp_'
+ r'test_st-test_wait_condition_handle-abcdefghijkl\?temp_'
'url_sig=[0-9a-f]{40}&temp_url_expires=[0-9]{10}') % st.id
self.assertThat(handle.FnGetAtt('endpoint'),
matchers.MatchesRegex(expected))
@@ -749,7 +749,7 @@ class SwiftSignalTest(common.HeatTestCase):
self.assertEqual(('CREATE', 'COMPLETE'), st.state)
expected = ("curl -i -X PUT 'http://fake-host.com:8080/v1/"
"AUTH_test_tenant/%s/test_st-test_wait_condition_"
- "handle-abcdefghijkl\?temp_url_sig=[0-9a-f]{40}&"
+ r"handle-abcdefghijkl\?temp_url_sig=[0-9a-f]{40}&"
"temp_url_expires=[0-9]{10}'") % st.id
self.assertThat(handle.FnGetAtt('curl_cli'),
matchers.MatchesRegex(expected))
diff --git a/heat/tests/openstack/keystone/test_domain.py b/heat/tests/openstack/keystone/test_domain.py
index bf8cb87c4..1f6e45259 100644
--- a/heat/tests/openstack/keystone/test_domain.py
+++ b/heat/tests/openstack/keystone/test_domain.py
@@ -35,8 +35,6 @@ KEYSTONE_REGION_TEMPLATE = {
}
}
-RESOURCE_TYPE = 'OS::Keystone::Domain'
-
class KeystoneDomainTest(common.HeatTestCase):
def setUp(self):
diff --git a/heat/tests/openstack/keystone/test_endpoint.py b/heat/tests/openstack/keystone/test_endpoint.py
index dc0e4f169..9bcc8b4c6 100644
--- a/heat/tests/openstack/keystone/test_endpoint.py
+++ b/heat/tests/openstack/keystone/test_endpoint.py
@@ -42,8 +42,6 @@ keystone_endpoint_template = {
}
}
-RESOURCE_TYPE = 'OS::Keystone::Endpoint'
-
class KeystoneEndpointTest(common.HeatTestCase):
def setUp(self):
diff --git a/heat/tests/openstack/keystone/test_group.py b/heat/tests/openstack/keystone/test_group.py
index 7d689d858..680382030 100644
--- a/heat/tests/openstack/keystone/test_group.py
+++ b/heat/tests/openstack/keystone/test_group.py
@@ -37,8 +37,6 @@ keystone_group_template = {
}
}
-RESOURCE_TYPE = 'OS::Keystone::Group'
-
class KeystoneGroupTest(common.HeatTestCase):
def setUp(self):
diff --git a/heat/tests/openstack/keystone/test_project.py b/heat/tests/openstack/keystone/test_project.py
index 089021510..9b3c4ef63 100644
--- a/heat/tests/openstack/keystone/test_project.py
+++ b/heat/tests/openstack/keystone/test_project.py
@@ -31,14 +31,13 @@ keystone_project_template = {
'description': 'Test project',
'domain': 'default',
'enabled': 'True',
- 'parent': 'my_father'
+ 'parent': 'my_father',
+ 'tags': ['label', 'insignia']
}
}
}
}
-RESOURCE_TYPE = 'OS::Keystone::Project'
-
class KeystoneProjectTest(common.HeatTestCase):
def setUp(self):
@@ -101,6 +100,9 @@ class KeystoneProjectTest(common.HeatTestCase):
self.assertEqual(
'my_father',
self.test_project.properties.get(project.KeystoneProject.PARENT))
+ self.assertEqual(
+ ['label', 'insignia'],
+ self.test_project.properties.get(project.KeystoneProject.TAGS))
self.test_project.handle_create()
@@ -110,7 +112,8 @@ class KeystoneProjectTest(common.HeatTestCase):
description='Test project',
domain='default',
enabled=True,
- parent='my_father')
+ parent='my_father',
+ tags=['label', 'insignia'])
# validate physical resource id
self.assertEqual(mock_project.id, self.test_project.resource_id)
@@ -249,7 +252,10 @@ class KeystoneProjectTest(common.HeatTestCase):
project.KeystoneProject.ENABLED)),
project.KeystoneProject.PARENT:
(self._get_property_schema_value_default(
- project.KeystoneProject.PARENT))
+ project.KeystoneProject.PARENT)),
+ project.KeystoneProject.TAGS:
+ (self._get_property_schema_value_default(
+ project.KeystoneProject.TAGS))
}
def _side_effect(key):
@@ -289,7 +295,8 @@ class KeystoneProjectTest(common.HeatTestCase):
description='',
domain='default',
enabled=True,
- parent=None)
+ parent=None,
+ tags=[])
def test_project_handle_update(self):
self.test_project.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
@@ -298,7 +305,8 @@ class KeystoneProjectTest(common.HeatTestCase):
project.KeystoneProject.DESCRIPTION:
'Test Project updated',
project.KeystoneProject.ENABLED: False,
- project.KeystoneProject.DOMAIN: 'test_domain'}
+ project.KeystoneProject.DOMAIN: 'test_domain',
+ project.KeystoneProject.TAGS: ['tag1', 'tag2']}
self.test_project.handle_update(json_snippet=None,
tmpl_diff=None,
@@ -309,7 +317,8 @@ class KeystoneProjectTest(common.HeatTestCase):
name=prop_diff[project.KeystoneProject.NAME],
description=prop_diff[project.KeystoneProject.DESCRIPTION],
enabled=prop_diff[project.KeystoneProject.ENABLED],
- domain='test_domain'
+ domain='test_domain',
+ tags=prop_diff[project.KeystoneProject.TAGS]
)
def test_project_handle_update_default(self):
@@ -317,7 +326,8 @@ class KeystoneProjectTest(common.HeatTestCase):
prop_diff = {project.KeystoneProject.DESCRIPTION:
'Test Project updated',
- project.KeystoneProject.ENABLED: False}
+ project.KeystoneProject.ENABLED: False,
+ project.KeystoneProject.TAGS: ['one', 'two']}
self.test_project.handle_update(json_snippet=None,
tmpl_diff=None,
@@ -330,7 +340,8 @@ class KeystoneProjectTest(common.HeatTestCase):
name=None,
description=prop_diff[project.KeystoneProject.DESCRIPTION],
enabled=prop_diff[project.KeystoneProject.ENABLED],
- domain='default'
+ domain='default',
+ tags=prop_diff[project.KeystoneProject.TAGS]
)
def test_project_handle_update_only_enabled(self):
@@ -346,7 +357,8 @@ class KeystoneProjectTest(common.HeatTestCase):
name=None,
description=None,
enabled=prop_diff[project.KeystoneProject.ENABLED],
- domain='default'
+ domain='default',
+ tags=['label', 'insignia']
)
def test_show_resource(self):
diff --git a/heat/tests/openstack/keystone/test_region.py b/heat/tests/openstack/keystone/test_region.py
index 81069c875..646802cd5 100644
--- a/heat/tests/openstack/keystone/test_region.py
+++ b/heat/tests/openstack/keystone/test_region.py
@@ -37,8 +37,6 @@ KEYSTONE_REGION_TEMPLATE = {
}
}
-RESOURCE_TYPE = 'OS::Keystone::Region'
-
class KeystoneRegionTest(common.HeatTestCase):
def setUp(self):
diff --git a/heat/tests/openstack/keystone/test_role.py b/heat/tests/openstack/keystone/test_role.py
index 445ef9e4b..df9eda150 100644
--- a/heat/tests/openstack/keystone/test_role.py
+++ b/heat/tests/openstack/keystone/test_role.py
@@ -34,8 +34,6 @@ keystone_role_template = {
}
}
-RESOURCE_TYPE = 'OS::Keystone::Role'
-
class KeystoneRoleTest(common.HeatTestCase):
def setUp(self):
diff --git a/heat/tests/openstack/keystone/test_service.py b/heat/tests/openstack/keystone/test_service.py
index 7eb643f54..ba2a7e0ad 100644
--- a/heat/tests/openstack/keystone/test_service.py
+++ b/heat/tests/openstack/keystone/test_service.py
@@ -39,8 +39,6 @@ keystone_service_template = {
}
}
-RESOURCE_TYPE = 'OS::Keystone::Service'
-
class KeystoneServiceTest(common.HeatTestCase):
def setUp(self):
diff --git a/heat/tests/openstack/keystone/test_user.py b/heat/tests/openstack/keystone/test_user.py
index a95aa0af4..7f67d5c34 100644
--- a/heat/tests/openstack/keystone/test_user.py
+++ b/heat/tests/openstack/keystone/test_user.py
@@ -41,8 +41,6 @@ keystone_user_template = {
}
}
-RESOURCE_TYPE = 'OS::Keystone::User'
-
class KeystoneUserTest(common.HeatTestCase):
def setUp(self):
diff --git a/heat/tests/openstack/magnum/test_cluster_template.py b/heat/tests/openstack/magnum/test_cluster_template.py
index f54bee7d2..3d9176c00 100644
--- a/heat/tests/openstack/magnum/test_cluster_template.py
+++ b/heat/tests/openstack/magnum/test_cluster_template.py
@@ -13,6 +13,7 @@
import copy
import mock
+from neutronclient.neutron import v2_0 as neutronV20
import six
from heat.common import exception
@@ -66,9 +67,9 @@ class TestMagnumClusterTemplate(common.HeatTestCase):
'flavor_id': 'm1.small',
'master_flavor_id': 'm1.medium',
'keypair_id': 'heat_key',
- 'external_network_id': '0244b54d-ae1f-44f0-a24a-442760f1d681',
- 'fixed_network': '0f59a3dd-fac1-4d03-b41a-d4115fbffa89',
- 'fixed_subnet': '27a8c89c-0d28-4946-8c78-82cfec1d670a',
+ 'external_network_id': 'id_for_net_or_sub',
+ 'fixed_network': 'id_for_net_or_sub',
+ 'fixed_subnet': 'id_for_net_or_sub',
'dns_nameserver': '8.8.8.8',
'docker_volume_size': 5,
'docker_storage_driver': 'devicemapper',
@@ -99,6 +100,9 @@ class TestMagnumClusterTemplate(common.HeatTestCase):
self.client = mock.Mock()
self.patchobject(cluster_template.ClusterTemplate, 'client',
return_value=self.client)
+ self.find_mock = self.patchobject(neutronV20,
+ 'find_resourceid_by_name_or_id')
+ self.find_mock.return_value = 'id_for_net_or_sub'
self.stub_FlavorConstraint_validate()
self.stub_KeypairConstraint_validate()
self.stub_ImageConstraint_validate()
diff --git a/heat/tests/openstack/manila/test_share.py b/heat/tests/openstack/manila/test_share.py
index ecf7f0bf5..edaa01a6f 100644
--- a/heat/tests/openstack/manila/test_share.py
+++ b/heat/tests/openstack/manila/test_share.py
@@ -229,7 +229,7 @@ class ManilaShareTest(common.HeatTestCase):
stack = utils.parse_stack(tmp, stack_name='access_type')
self.assertRaisesRegex(
exception.StackValidationFailed,
- ".* \"domain\" is not an allowed value \[ip, user, cert, cephx\]",
+ r'.* "domain" is not an allowed value \[ip, user, cert, cephx\]',
stack.validate)
def test_get_live_state(self):
diff --git a/heat/tests/openstack/mistral/test_workflow.py b/heat/tests/openstack/mistral/test_workflow.py
index 9855f67cb..04336ccca 100644
--- a/heat/tests/openstack/mistral/test_workflow.py
+++ b/heat/tests/openstack/mistral/test_workflow.py
@@ -687,11 +687,11 @@ class TestMistralWorkflow(common.HeatTestCase):
self.patchobject(exec_manager, '_create', return_value=execution)
scheduler.TaskRunner(wf.signal, details)()
call_args = self.mistral.executions.create.call_args
- args, _ = call_args
+ args, kwargs = call_args
expected_args = (
'{"image": "31d8eeaf-686e-4e95-bb27-765014b9f20b", '
'"name": "create_test_server", "flavor": "3"}')
- self.validate_json_inputs(args[1], expected_args)
+ self.validate_json_inputs(kwargs['workflow_input'], expected_args)
self.assertEqual({'executions': '12345'}, wf.data())
# Updating the workflow changing "use_request_body_as_input" to
# false and signaling again with the expected request body format.
@@ -712,11 +712,11 @@ class TestMistralWorkflow(common.HeatTestCase):
self.patchobject(exec_manager, '_create', return_value=execution)
scheduler.TaskRunner(wf.signal, details)()
call_args = self.mistral.executions.create.call_args
- args, _ = call_args
+ args, kwargs = call_args
expected_args = (
'{"image": "31d8eeaf-686e-4e95-bb27-765014b9f20b", '
'"name": "create_test_server", "flavor": "4"}')
- self.validate_json_inputs(args[1], expected_args)
+ self.validate_json_inputs(kwargs['workflow_input'], expected_args)
self.assertEqual({'executions': '54321,12345', 'name':
'test_stack-workflow-b5fiekdsa355'}, wf.data())
scheduler.TaskRunner(wf.delete)()
diff --git a/heat/tests/openstack/monasca/test_alarm_definition.py b/heat/tests/openstack/monasca/test_alarm_definition.py
index c629b0044..f0ea7927c 100644
--- a/heat/tests/openstack/monasca/test_alarm_definition.py
+++ b/heat/tests/openstack/monasca/test_alarm_definition.py
@@ -41,8 +41,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Monasca::AlarmDefinition'
-
class MonascaAlarmDefinitionTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/monasca/test_notification.py b/heat/tests/openstack/monasca/test_notification.py
index 95de586d7..d82732139 100644
--- a/heat/tests/openstack/monasca/test_notification.py
+++ b/heat/tests/openstack/monasca/test_notification.py
@@ -39,8 +39,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Monasca::Notification'
-
class MonascaNotificationTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/neutron/test_neutron_net.py b/heat/tests/openstack/neutron/test_neutron_net.py
index 7b90a5226..65fbc3f45 100644
--- a/heat/tests/openstack/neutron/test_neutron_net.py
+++ b/heat/tests/openstack/neutron/test_neutron_net.py
@@ -334,7 +334,6 @@ class NeutronNetTest(common.HeatTestCase):
reality = rsrc.get_live_state(rsrc.properties)
expected = {
- 'name': 'net1-net-wkkl2vwupdee',
'admin_state_up': True,
'qos_policy': "some",
'value_specs': {
diff --git a/heat/tests/openstack/neutron/test_neutron_port.py b/heat/tests/openstack/neutron/test_neutron_port.py
index 20a1320ae..423078109 100644
--- a/heat/tests/openstack/neutron/test_neutron_port.py
+++ b/heat/tests/openstack/neutron/test_neutron_port.py
@@ -819,7 +819,6 @@ class NeutronPortTest(common.HeatTestCase):
reality = port.get_live_state(port.properties)
expected = {
- 'name': 'flip-port-xjbal77qope3',
'allowed_address_pairs': [],
'admin_state_up': True,
'device_owner': '',
diff --git a/heat/tests/openstack/neutron/test_neutron_rbac_policy.py b/heat/tests/openstack/neutron/test_neutron_rbac_policy.py
index a806c1621..b67c3b65f 100644
--- a/heat/tests/openstack/neutron/test_neutron_rbac_policy.py
+++ b/heat/tests/openstack/neutron/test_neutron_rbac_policy.py
@@ -57,16 +57,13 @@ class RBACPolicyTest(common.HeatTestCase):
def test_create_qos_policy_rbac(self):
self._test_create(obj_type='qos_policy')
- def _test_validate_invalid_action(self,
+ def _test_validate_invalid_action(self, msg,
invalid_action='invalid',
obj_type='network'):
tpl = yaml.safe_load(inline_templates.RBAC_TEMPLATE)
tpl['resources']['rbac']['properties']['action'] = invalid_action
tpl['resources']['rbac']['properties']['object_type'] = obj_type
self._create_stack(tmpl=yaml.safe_dump(tpl))
- msg = ("Invalid action %(action)s for object type %(type)s." %
- {'action': invalid_action,
- 'type': obj_type})
self.patchobject(type(self.rbac), 'is_service_available',
return_value=(True, None))
@@ -75,21 +72,29 @@ class RBACPolicyTest(common.HeatTestCase):
self.rbac.validate)
def test_validate_action_for_network(self):
- self._test_validate_invalid_action()
+ msg = ('Property error: resources.rbac.properties.action: '
+ '"invalid" is not an allowed value '
+ r'\[access_as_shared, access_as_external\]')
+ self._test_validate_invalid_action(msg)
def test_validate_action_for_qos_policy(self):
- self._test_validate_invalid_action(
- obj_type='qos_policy')
+ msg = ('Property error: resources.rbac.properties.action: '
+ '"invalid" is not an allowed value '
+ r'\[access_as_shared, access_as_external\]')
+ self._test_validate_invalid_action(msg, obj_type='qos_policy')
# we dont support access_as_external for qos_policy
- self._test_validate_invalid_action(
- obj_type='qos_policy',
- invalid_action='access_as_external')
+ msg = ('Property error: resources.rbac.properties.action: '
+ 'Invalid action "access_as_external" for object type '
+ 'qos_policy. Valid actions: access_as_shared')
+ self._test_validate_invalid_action(msg,
+ obj_type='qos_policy',
+ invalid_action='access_as_external')
def test_validate_invalid_type(self):
tpl = yaml.safe_load(inline_templates.RBAC_TEMPLATE)
tpl['resources']['rbac']['properties']['object_type'] = 'networks'
self._create_stack(tmpl=yaml.safe_dump(tpl))
- msg = "Invalid object_type: networks. "
+ msg = '"networks" is not an allowed value'
self.patchobject(type(self.rbac), 'is_service_available',
return_value=(True, None))
diff --git a/heat/tests/openstack/neutron/test_neutron_router.py b/heat/tests/openstack/neutron/test_neutron_router.py
index 64ad1a443..5af60081f 100644
--- a/heat/tests/openstack/neutron/test_neutron_router.py
+++ b/heat/tests/openstack/neutron/test_neutron_router.py
@@ -855,7 +855,6 @@ class NeutronRouterTest(common.HeatTestCase):
'network': '1ede231a-0b46-40fc-ab3b-8029446d0d1b',
'enable_snat': True
},
- 'name': 'er-router-naqzmqnzk4ej',
'admin_state_up': True,
'value_specs': {
'test_value_spec': 'spec_value'
diff --git a/heat/tests/openstack/neutron/test_neutron_subnet.py b/heat/tests/openstack/neutron/test_neutron_subnet.py
index da9695478..4804a376f 100644
--- a/heat/tests/openstack/neutron/test_neutron_subnet.py
+++ b/heat/tests/openstack/neutron/test_neutron_subnet.py
@@ -749,7 +749,6 @@ class NeutronSubnetTest(common.HeatTestCase):
reality = rsrc.get_live_state(rsrc.properties)
expected = {
- 'name': 'subnet-subnet-la5usdgifhrd',
'enable_dhcp': True,
'dns_nameservers': [],
'allocation_pools': [{'start': '10.0.0.2', 'end': '10.0.0.126'}],
diff --git a/heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py b/heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py
index 02e723ae8..6018283e9 100644
--- a/heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py
+++ b/heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py
@@ -44,8 +44,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Neutron::FlowClassifier'
-
class FlowClassifierTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/neutron/test_sfc/test_port_chain.py b/heat/tests/openstack/neutron/test_sfc/test_port_chain.py
index 9c6525fc9..7d29d18c8 100644
--- a/heat/tests/openstack/neutron/test_sfc/test_port_chain.py
+++ b/heat/tests/openstack/neutron/test_sfc/test_port_chain.py
@@ -38,8 +38,6 @@ port_chain_template = {
}
}
-RESOURCE_TYPE = 'OS::Neutron::PortChain'
-
class PortChainTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/neutron/test_sfc/test_port_pair.py b/heat/tests/openstack/neutron/test_sfc/test_port_pair.py
index b4636be7d..df8734997 100644
--- a/heat/tests/openstack/neutron/test_sfc/test_port_pair.py
+++ b/heat/tests/openstack/neutron/test_sfc/test_port_pair.py
@@ -35,8 +35,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Neutron::PortPair'
-
class PortPairTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py b/heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py
index 2ff4f5fab..bf2cad400 100644
--- a/heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py
+++ b/heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py
@@ -34,8 +34,6 @@ sample_template = {
}
}
-RESOURCE_TYPE = 'OS::Neutron::PortPairGroup'
-
class PortPairGroupTest(common.HeatTestCase):
diff --git a/heat/tests/openstack/nova/fakes.py b/heat/tests/openstack/nova/fakes.py
index 0934dffc8..73825bf5a 100644
--- a/heat/tests/openstack/nova/fakes.py
+++ b/heat/tests/openstack/nova/fakes.py
@@ -287,6 +287,8 @@ class FakeSessionClient(base_client.SessionClient):
assert set(body[action].keys()) == set(['host',
'block_migration',
'disk_over_commit'])
+ elif action == 'forceDelete':
+ assert body is not None
else:
raise AssertionError("Unexpected server action: %s" % action)
return (resp, _body)
diff --git a/heat/tests/openstack/nova/test_floatingip.py b/heat/tests/openstack/nova/test_floatingip.py
index 1cd79ca09..45d9d600a 100644
--- a/heat/tests/openstack/nova/test_floatingip.py
+++ b/heat/tests/openstack/nova/test_floatingip.py
@@ -63,28 +63,27 @@ floating_ip_template_with_assoc = '''
class NovaFloatingIPTest(common.HeatTestCase):
def setUp(self):
super(NovaFloatingIPTest, self).setUp()
- self.novaclient = mock.Mock()
- self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
- self.m.StubOutWithMock(self.novaclient.servers, 'get')
- self.m.StubOutWithMock(neutronclient.Client, 'list_networks')
+ self.novaclient = fakes_nova.FakeClient()
+ self.patchobject(nova.NovaClientPlugin, '_create',
+ return_value=self.novaclient)
self.m.StubOutWithMock(neutronclient.Client,
'create_floatingip')
self.m.StubOutWithMock(neutronclient.Client,
- 'show_floatingip')
- self.m.StubOutWithMock(neutronclient.Client,
'update_floatingip')
self.m.StubOutWithMock(neutronclient.Client,
'delete_floatingip')
- self.m.StubOutWithMock(self.novaclient.servers, 'add_floating_ip')
- self.m.StubOutWithMock(self.novaclient.servers, 'remove_floating_ip')
- self.patchobject(nova.NovaClientPlugin, 'get_server',
- return_value=mock.MagicMock())
- self.patchobject(nova.NovaClientPlugin, 'has_extension',
- return_value=True)
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value='eeee')
+ def mock_interface(self, port, ip):
+ class MockIface(object):
+ def __init__(self, port_id, fixed_ip):
+ self.port_id = port_id
+ self.fixed_ips = [{'ip_address': fixed_ip}]
+
+ return MockIface(port, ip)
+
def mock_create_floatingip(self):
neutronclient.Client.create_floatingip({
'floatingip': {'floating_network_id': u'eeee'}
@@ -95,22 +94,28 @@ class NovaFloatingIPTest(common.HeatTestCase):
"floating_ip_address": "11.0.0.1"
}})
- def mock_show_floatingip(self, refid):
- if refid == 'fc68ea2c-b60b-4b4f-bd82-94ec81110766':
- address = '11.0.0.1'
+ def mock_update_floatingip(self,
+ fip='fc68ea2c-b60b-4b4f-bd82-94ec81110766',
+ ex=None, fip_request=None,
+ delete_assc=False):
+ if fip_request:
+ request_body = fip_request
+ elif delete_assc:
+ request_body = {
+ 'floatingip': {
+ 'port_id': None,
+ 'fixed_ip_address': None}}
else:
- address = '11.0.0.2'
- neutronclient.Client.show_floatingip(
- refid,
- ).AndReturn({'floatingip': {
- 'router_id': None,
- 'tenant_id': 'e936e6cd3e0b48dcb9ff853a8f253257',
- 'floating_network_id': 'eeee',
- 'fixed_ip_address': None,
- 'floating_ip_address': address,
- 'port_id': None,
- 'id': 'ffff'
- }})
+ request_body = {
+ 'floatingip': {
+ 'port_id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ 'fixed_ip_address': '1.2.3.4'}}
+ if ex:
+ neutronclient.Client.update_floatingip(
+ fip, request_body).AndRaise(ex)
+ else:
+ neutronclient.Client.update_floatingip(
+ fip, request_body).AndReturn(None)
def mock_delete_floatingip(self):
id = 'fc68ea2c-b60b-4b4f-bd82-94ec81110766'
@@ -127,10 +132,12 @@ class NovaFloatingIPTest(common.HeatTestCase):
self.stack)
def prepare_floating_ip_assoc(self):
- nova.NovaClientPlugin._create().AndReturn(
- self.novaclient)
- self.novaclient.servers.get('67dc62f9-efde-4c8b-94af-013e00f5dc57')
- self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
+ return_server = self.novaclient.servers.list()[1]
+ self.patchobject(self.novaclient.servers, 'get',
+ return_value=return_server)
+ iface = self.mock_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '1.2.3.4')
+ self.patchobject(return_server, 'interface_list', return_value=[iface])
template = template_format.parse(floating_ip_template_with_assoc)
self.stack = utils.parse_stack(template)
resource_defns = self.stack.t.resource_definitions(self.stack)
@@ -169,9 +176,7 @@ class NovaFloatingIPTest(common.HeatTestCase):
def test_delete_floating_ip_assoc_successful_if_create_failed(self):
rsrc = self.prepare_floating_ip_assoc()
- self.novaclient.servers.add_floating_ip(None, '11.0.0.1').AndRaise(
- fakes_nova.fake_exception(400))
-
+ self.mock_update_floatingip(fakes_nova.fake_exception(400))
self.m.ReplayAll()
rsrc.validate()
@@ -185,7 +190,7 @@ class NovaFloatingIPTest(common.HeatTestCase):
def test_floating_ip_assoc_create(self):
rsrc = self.prepare_floating_ip_assoc()
- self.novaclient.servers.add_floating_ip(None, '11.0.0.1')
+ self.mock_update_floatingip()
self.m.ReplayAll()
rsrc.validate()
@@ -200,12 +205,8 @@ class NovaFloatingIPTest(common.HeatTestCase):
def test_floating_ip_assoc_delete(self):
rsrc = self.prepare_floating_ip_assoc()
- self.novaclient.servers.add_floating_ip(None, '11.0.0.1')
- self.novaclient.servers.get(
- '67dc62f9-efde-4c8b-94af-013e00f5dc57').AndReturn('server')
- self.novaclient.servers.remove_floating_ip('server', '11.0.0.1')
- self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
-
+ self.mock_update_floatingip()
+ self.mock_update_floatingip(delete_assc=True)
self.m.ReplayAll()
rsrc.validate()
@@ -216,16 +217,11 @@ class NovaFloatingIPTest(common.HeatTestCase):
self.m.VerifyAll()
- def create_delete_assoc_with_exc(self, exc_code):
+ def test_floating_ip_assoc_delete_not_found(self):
rsrc = self.prepare_floating_ip_assoc()
- self.novaclient.servers.add_floating_ip(None, '11.0.0.1')
- self.novaclient.servers.get(
- "67dc62f9-efde-4c8b-94af-013e00f5dc57").AndReturn("server")
- self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
- self.novaclient.servers.remove_floating_ip("server",
- "11.0.0.1").AndRaise(
- fakes_nova.fake_exception(exc_code))
-
+ self.mock_update_floatingip()
+ self.mock_update_floatingip(ex=fakes_nova.fake_exception(404),
+ delete_assc=True)
self.m.ReplayAll()
rsrc.validate()
@@ -236,26 +232,28 @@ class NovaFloatingIPTest(common.HeatTestCase):
self.m.VerifyAll()
- def test_floating_ip_assoc_delete_conflict(self):
- self.create_delete_assoc_with_exc(exc_code=409)
-
- def test_floating_ip_assoc_delete_not_found(self):
- self.create_delete_assoc_with_exc(exc_code=404)
-
def test_floating_ip_assoc_update_server_id(self):
rsrc = self.prepare_floating_ip_assoc()
- # for create
- self.novaclient.servers.add_floating_ip(None, '11.0.0.1')
- # for update
- self.novaclient.servers.get(
- '2146dfbf-ba77-4083-8e86-d052f671ece5').AndReturn('server')
- self.novaclient.servers.add_floating_ip('server', '11.0.0.1')
- self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
+ self.mock_update_floatingip()
+ fip_request = {'floatingip': {
+ 'fixed_ip_address': '4.5.6.7',
+ 'port_id': 'bbbbb-bbbb-bbbb-bbbbbbbbb'}
+ }
+ self.mock_update_floatingip(fip_request=fip_request)
self.m.ReplayAll()
rsrc.validate()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
+
+ # for update
+ return_server = self.novaclient.servers.list()[2]
+ self.patchobject(self.novaclient.servers, 'get',
+ return_value=return_server)
+ iface = self.mock_interface('bbbbb-bbbb-bbbb-bbbbbbbbb',
+ '4.5.6.7')
+ self.patchobject(return_server, 'interface_list', return_value=[iface])
+
# update with the new server_id
props = copy.deepcopy(rsrc.properties.data)
update_server_id = '2146dfbf-ba77-4083-8e86-d052f671ece5'
@@ -270,17 +268,11 @@ class NovaFloatingIPTest(common.HeatTestCase):
def test_floating_ip_assoc_update_fl_ip(self):
rsrc = self.prepare_floating_ip_assoc()
# for create
- self.novaclient.servers.add_floating_ip(None, '11.0.0.1')
+ self.mock_update_floatingip()
# mock for delete the old association
- self.novaclient.servers.get(
- '67dc62f9-efde-4c8b-94af-013e00f5dc57').AndReturn('server')
- self.novaclient.servers.remove_floating_ip('server', '11.0.0.1')
+ self.mock_update_floatingip(delete_assc=True)
# mock for new association
- self.novaclient.servers.get(
- '67dc62f9-efde-4c8b-94af-013e00f5dc57').AndReturn('server')
- self.novaclient.servers.add_floating_ip('server', '11.0.0.2')
- self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
- self.mock_show_floatingip('fc68ea2c-cccc-4b4f-bd82-94ec81110766')
+ self.mock_update_floatingip(fip='fc68ea2c-dddd-4b4f-bd82-94ec81110766')
self.m.ReplayAll()
rsrc.validate()
@@ -288,7 +280,7 @@ class NovaFloatingIPTest(common.HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
# update with the new floatingip
props = copy.deepcopy(rsrc.properties.data)
- props['floating_ip'] = 'fc68ea2c-cccc-4b4f-bd82-94ec81110766'
+ props['floating_ip'] = 'fc68ea2c-dddd-4b4f-bd82-94ec81110766'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
scheduler.TaskRunner(rsrc.update, update_snippet)()
@@ -299,28 +291,33 @@ class NovaFloatingIPTest(common.HeatTestCase):
def test_floating_ip_assoc_update_both(self):
rsrc = self.prepare_floating_ip_assoc()
# for create
- self.novaclient.servers.add_floating_ip(None, '11.0.0.1')
+ self.mock_update_floatingip()
# mock for delete the old association
- self.novaclient.servers.get(
- '67dc62f9-efde-4c8b-94af-013e00f5dc57').AndReturn('server')
- self.novaclient.servers.remove_floating_ip('server', '11.0.0.1')
+ self.mock_update_floatingip(delete_assc=True)
# mock for new association
- self.novaclient.servers.get(
- '2146dfbf-ba77-4083-8e86-d052f671ece5').AndReturn('new_server')
- self.novaclient.servers.add_floating_ip('new_server', '11.0.0.2')
- self.mock_show_floatingip('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
- self.mock_show_floatingip('fc68ea2c-cccc-4b4f-bd82-94ec81110766')
-
+ fip_request = {'floatingip': {
+ 'fixed_ip_address': '4.5.6.7',
+ 'port_id': 'bbbbb-bbbb-bbbb-bbbbbbbbb'}
+ }
+ self.mock_update_floatingip(fip='fc68ea2c-dddd-4b4f-bd82-94ec81110766',
+ fip_request=fip_request)
self.m.ReplayAll()
rsrc.validate()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
- # update with the new floatingip
+ # update with the new floatingip and server
+ return_server = self.novaclient.servers.list()[2]
+ self.patchobject(self.novaclient.servers, 'get',
+ return_value=return_server)
+ iface = self.mock_interface('bbbbb-bbbb-bbbb-bbbbbbbbb',
+ '4.5.6.7')
+ self.patchobject(return_server, 'interface_list', return_value=[iface])
+
props = copy.deepcopy(rsrc.properties.data)
update_server_id = '2146dfbf-ba77-4083-8e86-d052f671ece5'
props['server_id'] = update_server_id
- props['floating_ip'] = 'fc68ea2c-cccc-4b4f-bd82-94ec81110766'
+ props['floating_ip'] = 'fc68ea2c-dddd-4b4f-bd82-94ec81110766'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(),
props)
scheduler.TaskRunner(rsrc.update, update_snippet)()
diff --git a/heat/tests/openstack/nova/test_server.py b/heat/tests/openstack/nova/test_server.py
index f09d32f06..8909d3442 100644
--- a/heat/tests/openstack/nova/test_server.py
+++ b/heat/tests/openstack/nova/test_server.py
@@ -123,7 +123,7 @@ resources:
image: F17-x86_64-gold
flavor: m1.large
networks:
- - { uuid: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' }
+ - { network: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' }
subnet:
type: OS::Neutron::Subnet
properties:
@@ -134,7 +134,7 @@ resources:
network: 'bbccbbcc-bbcc-bbcc-bbcc-bbccbbccbbcc'
'''
-mult_subnet_template = '''
+multi_subnet_template = '''
heat_template_version: 2013-05-23
resources:
server:
@@ -218,6 +218,17 @@ resources:
"""
+def create_fake_iface(port=None, net=None, mac=None, ip=None, subnet=None):
+ class fake_interface(object):
+ def __init__(self, port_id, net_id, mac_addr, fixed_ip, subnet_id):
+ self.port_id = port_id
+ self.net_id = net_id
+ self.mac_addr = mac_addr
+ self.fixed_ips = [{'ip_address': fixed_ip, 'subnet_id': subnet_id}]
+
+ return fake_interface(port, net, mac, ip, subnet)
+
+
class ServersTest(common.HeatTestCase):
def setUp(self):
super(ServersTest, self).setUp()
@@ -356,15 +367,6 @@ class ServersTest(common.HeatTestCase):
scheduler.TaskRunner(server.create)()
return server
- def _create_fake_iface(self, port, mac, ip):
- class fake_interface(object):
- def __init__(self, port_id, mac_addr, fixed_ip):
- self.port_id = port_id
- self.mac_addr = mac_addr
- self.fixed_ips = [{'ip_address': fixed_ip}]
-
- return fake_interface(port, mac, ip)
-
def test_subnet_dependency_by_network_id(self):
templ, stack = self._setup_test_stack('subnet-test',
subnet_template)
@@ -381,7 +383,7 @@ class ServersTest(common.HeatTestCase):
# The use case here is creating a network + subnets + server
# from within one stack
templ, stack = self._setup_test_stack('subnet-test',
- mult_subnet_template)
+ multi_subnet_template)
server_rsrc = stack['server']
subnet1_rsrc = stack['subnet1']
subnet2_rsrc = stack['subnet2']
@@ -414,11 +416,15 @@ class ServersTest(common.HeatTestCase):
# this makes sure the auto increment worked on server creation
self.assertGreater(server.id, 0)
- interfaces = [
- self._create_fake_iface('1234', 'fa:16:3e:8c:22:aa', '4.5.6.7'),
- self._create_fake_iface('5678', 'fa:16:3e:8c:33:bb', '5.6.9.8'),
- self._create_fake_iface(
- '1013', 'fa:16:3e:8c:44:cc', '10.13.12.13')]
+ interfaces = [create_fake_iface(port='1234',
+ mac='fa:16:3e:8c:22:aa',
+ ip='4.5.6.7'),
+ create_fake_iface(port='5678',
+ mac='fa:16:3e:8c:33:bb',
+ ip='5.6.9.8'),
+ create_fake_iface(port='1013',
+ mac='fa:16:3e:8c:44:cc',
+ ip='10.13.12.13')]
self.patchobject(self.fc.servers, 'get', return_value=return_server)
self.patchobject(return_server, 'interface_list',
@@ -555,11 +561,15 @@ class ServersTest(common.HeatTestCase):
override_name=True)
server.resource_id = '1234'
- interfaces = [
- self._create_fake_iface('1234', 'fa:16:3e:8c:22:aa', '4.5.6.7'),
- self._create_fake_iface('5678', 'fa:16:3e:8c:33:bb', '5.6.9.8'),
- self._create_fake_iface(
- '1013', 'fa:16:3e:8c:44:cc', '10.13.12.13')]
+ interfaces = [create_fake_iface(port='1234',
+ mac='fa:16:3e:8c:22:aa',
+ ip='4.5.6.7'),
+ create_fake_iface(port='5678',
+ mac='fa:16:3e:8c:33:bb',
+ ip='5.6.9.8'),
+ create_fake_iface(port='1013',
+ mac='fa:16:3e:8c:44:cc',
+ ip='10.13.12.13')]
self.patchobject(self.fc.servers, 'get', return_value=return_server)
self.patchobject(return_server, 'interface_list',
@@ -1949,28 +1959,20 @@ class ServersTest(common.HeatTestCase):
server.properties.data['networks'] = [{'network': 'public_id',
'fixed_ip': '5.6.9.8'}]
- class fake_interface(object):
- def __init__(self, port_id, net_id, fixed_ip, mac_addr):
- self.port_id = port_id
- self.net_id = net_id
- self.mac_addr = mac_addr
-
- self.fixed_ips = [{'ip_address': fixed_ip}]
-
- iface = fake_interface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- 'public',
- '5.6.9.8',
- 'fa:16:3e:8c:33:aa')
- iface1 = fake_interface('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- 'public',
- '4.5.6.7',
- 'fa:16:3e:8c:22:aa')
- iface2 = fake_interface('cccccccc-cccc-cccc-cccc-cccccccccccc',
- 'private',
- '10.13.12.13',
- 'fa:16:3e:8c:44:cc')
+ iface0 = create_fake_iface(port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='public',
+ ip='5.6.9.8',
+ mac='fa:16:3e:8c:33:aa')
+ iface1 = create_fake_iface(port='bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ net='public',
+ ip='4.5.6.7',
+ mac='fa:16:3e:8c:22:aa')
+ iface2 = create_fake_iface(port='cccccccc-cccc-cccc-cccc-cccccccccccc',
+ net='private',
+ ip='10.13.12.13',
+ mac='fa:16:3e:8c:44:cc')
self.patchobject(return_server, 'interface_list',
- return_value=[iface, iface1, iface2])
+ return_value=[iface0, iface1, iface2])
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
@@ -3260,15 +3262,6 @@ class ServersTest(common.HeatTestCase):
'allocate_network': str_network,
'tag': tag}
- def create_fake_iface(self, port, net, ip):
- class fake_interface(object):
- def __init__(self, port_id, net_id, fixed_ip):
- self.port_id = port_id
- self.net_id = net_id
- self.fixed_ips = [{'ip_address': fixed_ip}]
-
- return fake_interface(port, net, ip)
-
def test_get_network_id_neutron(self):
return_server = self.fc.servers.list()[3]
server = self._create_test_server(return_server, 'networks_update')
@@ -3292,8 +3285,10 @@ class ServersTest(common.HeatTestCase):
return_server = self.fc.servers.list()[3]
server = self._create_test_server(return_server, 'networks_update')
- for new_nets in ([],
- [{'port': '952fd4ae-53b9-4b39-9e5f-8929c553b5ae'}]):
+ for new_nets in (
+ [],
+ [{'port': '952fd4ae-53b9-4b39-9e5f-8929c553b5ae',
+ 'network': '450abbc9-9b6d-4d6f-8c3a-c47ac34100dd'}]):
old_nets = [
self.create_old_net(
@@ -3302,20 +3297,48 @@ class ServersTest(common.HeatTestCase):
net='f3ef5d2f-d7ba-4b27-af66-58ca0b81e032', ip='1.2.3.4'),
self.create_old_net(
net='0da8adbf-a7e2-4c59-a511-96b03d2da0d7')]
-
- new_nets_copy = copy.deepcopy(new_nets)
- old_nets_copy = copy.deepcopy(old_nets)
- for net in new_nets_copy:
+ interfaces = [
+ create_fake_iface(
+ port='2a60cbaa-3d33-4af6-a9ce-83594ac546fc',
+ net='450abbc9-9b6d-4d6f-8c3a-c47ac34100aa',
+ ip='4.3.2.1',
+ subnet='subnetsu-bnet-subn-etsu-bnetsubnetsu'),
+ create_fake_iface(
+ port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='f3ef5d2f-d7ba-4b27-af66-58ca0b81e032',
+ ip='1.2.3.4',
+ subnet='subnetsu-bnet-subn-etsu-bnetsubnetsu'),
+ create_fake_iface(
+ port='bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ net='0da8adbf-a7e2-4c59-a511-96b03d2da0d7',
+ ip='4.2.3.1',
+ subnet='subnetsu-bnet-subn-etsu-bnetsubnetsu')]
+
+ new_nets_cpy = copy.deepcopy(new_nets)
+ old_nets_cpy = copy.deepcopy(old_nets)
+ # Add values to old_nets_cpy that is populated in old_nets when
+ # calling update_networks_matching_iface_port() in
+ # _exclude_not_updated_networks()
+ old_nets_cpy[0]['fixed_ip'] = '4.3.2.1'
+ old_nets_cpy[0]['network'] = '450abbc9-9b6d-4d6f-8c3a-c47ac34100aa'
+ old_nets_cpy[0]['subnet'] = 'subnetsu-bnet-subn-etsu-bnetsubnetsu'
+ old_nets_cpy[1]['fixed_ip'] = '1.2.3.4'
+ old_nets_cpy[1]['port'] = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ old_nets_cpy[1]['subnet'] = 'subnetsu-bnet-subn-etsu-bnetsubnetsu'
+ old_nets_cpy[2]['fixed_ip'] = '4.2.3.1'
+ old_nets_cpy[2]['port'] = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
+ old_nets_cpy[2]['subnet'] = 'subnetsu-bnet-subn-etsu-bnetsubnetsu'
+
+ for net in new_nets_cpy:
for key in ('port', 'network', 'fixed_ip', 'uuid', 'subnet',
'port_extra_properties', 'floating_ip',
'allocate_network', 'tag'):
net.setdefault(key)
- matched_nets = server._exclude_not_updated_networks(old_nets,
- new_nets)
- self.assertEqual([], matched_nets)
- self.assertEqual(old_nets_copy, old_nets)
- self.assertEqual(new_nets_copy, new_nets)
+ server._exclude_not_updated_networks(old_nets, new_nets,
+ interfaces)
+ self.assertEqual(old_nets_cpy, old_nets)
+ self.assertEqual(new_nets_cpy, new_nets)
def test_exclude_not_updated_networks_success(self):
return_server = self.fc.servers.list()[3]
@@ -3334,17 +3357,32 @@ class ServersTest(common.HeatTestCase):
{'network': 'f3ef5d2f-d7ba-4b27-af66-58ca0b81e032',
'fixed_ip': '1.2.3.4'},
{'port': '952fd4ae-53b9-4b39-9e5f-8929c553b5ae'}]
+ interfaces = [
+ create_fake_iface(port='2a60cbaa-3d33-4af6-a9ce-83594ac546fc',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='3.4.5.6'),
+ create_fake_iface(port='bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ net='f3ef5d2f-d7ba-4b27-af66-58ca0b81e032',
+ ip='1.2.3.4'),
+ create_fake_iface(port='cccccccc-cccc-cccc-cccc-cccccccccccc',
+ net='0da8adbf-a7e2-4c59-a511-96b03d2da0d7',
+ ip='2.3.4.5')]
new_nets_copy = copy.deepcopy(new_nets)
old_nets_copy = copy.deepcopy(old_nets)
+ # Add values to old_nets_copy that is populated in old_nets when
+ # calling update_networks_matching_iface_port() in
+ # _exclude_not_updated_networks()
+ old_nets_copy[2]['fixed_ip'] = '2.3.4.5'
+ old_nets_copy[2]['port'] = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
+
for net in new_nets_copy:
for key in ('port', 'network', 'fixed_ip', 'uuid', 'subnet',
'port_extra_properties', 'floating_ip',
'allocate_network', 'tag'):
net.setdefault(key)
- matched_nets = server._exclude_not_updated_networks(old_nets, new_nets)
- self.assertEqual(old_nets_copy[:-1], matched_nets)
+ server._exclude_not_updated_networks(old_nets, new_nets, interfaces)
self.assertEqual([old_nets_copy[2]], old_nets)
self.assertEqual([new_nets_copy[2]], new_nets)
@@ -3367,10 +3405,12 @@ class ServersTest(common.HeatTestCase):
'floating_ip': None,
'allocate_network': None,
'tag': None}]
- new_nets_copy = copy.deepcopy(new_nets)
+ interfaces = [
+ create_fake_iface(port='',
+ net='f3ef5d2f-d7ba-4b27-af66-58ca0b81e032',
+ ip='')]
- matched_nets = server._exclude_not_updated_networks(old_nets, new_nets)
- self.assertEqual(new_nets_copy, matched_nets)
+ server._exclude_not_updated_networks(old_nets, new_nets, interfaces)
self.assertEqual([], old_nets)
self.assertEqual([], new_nets)
@@ -3378,34 +3418,49 @@ class ServersTest(common.HeatTestCase):
return_server = self.fc.servers.list()[3]
server = self._create_test_server(return_server, 'networks_update')
- # old order 0 1 2 3 4
+ # old order 0 1 2 3 4 5 6
nets = [
self.create_old_net(port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'),
self.create_old_net(net='gggggggg-1111-1111-1111-gggggggggggg',
ip='1.2.3.4'),
self.create_old_net(net='gggggggg-1111-1111-1111-gggggggggggg'),
self.create_old_net(port='dddddddd-dddd-dddd-dddd-dddddddddddd'),
- self.create_old_net(uuid='gggggggg-1111-1111-1111-gggggggggggg',
- ip='5.6.7.8')]
- # new order 2 3 0 1 4
+ self.create_old_net(net='gggggggg-1111-1111-1111-gggggggggggg',
+ ip='5.6.7.8'),
+ self.create_old_net(net='gggggggg-1111-1111-1111-gggggggggggg',
+ subnet='hhhhhhhh-1111-1111-1111-hhhhhhhhhhhh'),
+ self.create_old_net(subnet='iiiiiiii-1111-1111-1111-iiiiiiiiiiii')]
+ # new order 2 3 0 1 4 6 5
interfaces = [
- self.create_fake_iface('cccccccc-cccc-cccc-cccc-cccccccccccc',
- nets[2]['network'], '10.0.0.11'),
- self.create_fake_iface(nets[3]['port'],
- 'gggggggg-1111-1111-1111-gggggggggggg',
- '10.0.0.12'),
- self.create_fake_iface(nets[0]['port'],
- 'gggggggg-1111-1111-1111-gggggggggggg',
- '10.0.0.13'),
- self.create_fake_iface('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- nets[1]['network'], nets[1]['fixed_ip']),
- self.create_fake_iface('eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee',
- nets[4]['network'], nets[4]['fixed_ip'])]
+ create_fake_iface(port='cccccccc-cccc-cccc-cccc-cccccccccccc',
+ net=nets[2]['network'],
+ ip='10.0.0.11'),
+ create_fake_iface(port=nets[3]['port'],
+ net='gggggggg-1111-1111-1111-gggggggggggg',
+ ip='10.0.0.12'),
+ create_fake_iface(port=nets[0]['port'],
+ net='gggggggg-1111-1111-1111-gggggggggggg',
+ ip='10.0.0.13'),
+ create_fake_iface(port='bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ net=nets[1]['network'],
+ ip=nets[1]['fixed_ip']),
+ create_fake_iface(port='eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee',
+ net=nets[4]['network'],
+ ip=nets[4]['fixed_ip']),
+ create_fake_iface(port='gggggggg-gggg-gggg-gggg-gggggggggggg',
+ net='gggggggg-1111-1111-1111-gggggggggggg',
+ ip='10.0.0.14',
+ subnet=nets[6]['subnet']),
+ create_fake_iface(port='ffffffff-ffff-ffff-ffff-ffffffffffff',
+ net=nets[5]['network'],
+ ip='10.0.0.15',
+ subnet=nets[5]['subnet'])]
+
# all networks should get port id
expected = [
{'port': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- 'network': None,
- 'fixed_ip': None,
+ 'network': 'gggggggg-1111-1111-1111-gggggggggggg',
+ 'fixed_ip': '10.0.0.13',
'subnet': None,
'floating_ip': None,
'port_extra_properties': None,
@@ -3423,7 +3478,7 @@ class ServersTest(common.HeatTestCase):
'tag': None},
{'port': 'cccccccc-cccc-cccc-cccc-cccccccccccc',
'network': 'gggggggg-1111-1111-1111-gggggggggggg',
- 'fixed_ip': None,
+ 'fixed_ip': '10.0.0.11',
'subnet': None,
'port_extra_properties': None,
'floating_ip': None,
@@ -3431,8 +3486,8 @@ class ServersTest(common.HeatTestCase):
'allocate_network': None,
'tag': None},
{'port': 'dddddddd-dddd-dddd-dddd-dddddddddddd',
- 'network': None,
- 'fixed_ip': None,
+ 'network': 'gggggggg-1111-1111-1111-gggggggggggg',
+ 'fixed_ip': '10.0.0.12',
'subnet': None,
'port_extra_properties': None,
'floating_ip': None,
@@ -3440,18 +3495,39 @@ class ServersTest(common.HeatTestCase):
'allocate_network': None,
'tag': None},
{'port': 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee',
- 'uuid': 'gggggggg-1111-1111-1111-gggggggggggg',
+ 'uuid': None,
'fixed_ip': '5.6.7.8',
'subnet': None,
'port_extra_properties': None,
'floating_ip': None,
- 'network': None,
+ 'network': 'gggggggg-1111-1111-1111-gggggggggggg',
+ 'allocate_network': None,
+ 'tag': None},
+ {'port': 'ffffffff-ffff-ffff-ffff-ffffffffffff',
+ 'uuid': None,
+ 'fixed_ip': '10.0.0.15',
+ 'subnet': 'hhhhhhhh-1111-1111-1111-hhhhhhhhhhhh',
+ 'port_extra_properties': None,
+ 'floating_ip': None,
+ 'network': 'gggggggg-1111-1111-1111-gggggggggggg',
+ 'allocate_network': None,
+ 'tag': None},
+ {'port': 'gggggggg-gggg-gggg-gggg-gggggggggggg',
+ 'uuid': None,
+ 'fixed_ip': '10.0.0.14',
+ 'subnet': 'iiiiiiii-1111-1111-1111-iiiiiiiiiiii',
+ 'port_extra_properties': None,
+ 'floating_ip': None,
+ 'network': 'gggggggg-1111-1111-1111-gggggggggggg',
'allocate_network': None,
'tag': None}]
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value='gggggggg-1111-1111-1111-gggggggggggg')
+ self.patchobject(neutron.NeutronClientPlugin,
+ 'network_id_from_subnet_id',
+ return_value='gggggggg-1111-1111-1111-gggggggggggg')
server.update_networks_matching_iface_port(nets, interfaces)
self.assertEqual(expected, nets)
@@ -3468,9 +3544,10 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
- iface = self.create_fake_iface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
- '1.2.3.4')
+ iface = create_fake_iface(
+ port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
+ ip='1.2.3.4')
self.patchobject(return_server, 'interface_list', return_value=[iface])
mock_detach = self.patchobject(return_server, 'interface_detach')
mock_attach = self.patchobject(return_server, 'interface_attach')
@@ -3505,9 +3582,10 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
- iface = self.create_fake_iface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
- '1.2.3.4')
+ iface = create_fake_iface(
+ port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
+ ip='1.2.3.4')
self.patchobject(return_server, 'interface_list', return_value=[iface])
mock_detach = self.patchobject(return_server, 'interface_detach')
mock_attach = self.patchobject(return_server, 'interface_attach')
@@ -3555,9 +3633,11 @@ class ServersTest(common.HeatTestCase):
mock_create_port = self.patchobject(
neutronclient.Client, 'create_port')
- iface = self.create_fake_iface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '05d8e681-4b37-4570-bc8d-810089f706b2',
- '1.2.3.4')
+ iface = create_fake_iface(
+ port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='05d8e681-4b37-4570-bc8d-810089f706b2',
+ subnet='aaa09d50-8c23-4498-a542-aa0deb24f73e',
+ ip='1.2.3.4')
self.patchobject(return_server, 'interface_list', return_value=[iface])
mock_detach = self.patchobject(return_server, 'interface_detach')
mock_attach = self.patchobject(return_server, 'interface_attach')
@@ -3611,9 +3691,11 @@ class ServersTest(common.HeatTestCase):
'network_id_from_subnet_id',
return_value='05d8e681-4b37-4570-bc8d-810089f706b2')
- iface = self.create_fake_iface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '05d8e681-4b37-4570-bc8d-810089f706b2',
- '1.2.3.4')
+ iface = create_fake_iface(
+ port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='05d8e681-4b37-4570-bc8d-810089f706b2',
+ subnet='aaa09d50-8c23-4498-a542-aa0deb24f73e',
+ ip='1.2.3.4')
self.patchobject(return_server, 'interface_list', return_value=[iface])
mock_detach = self.patchobject(return_server, 'interface_detach')
mock_attach = self.patchobject(return_server, 'interface_attach')
@@ -3662,9 +3744,10 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
- iface = self.create_fake_iface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
- '1.2.3.4')
+ iface = create_fake_iface(
+ port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
+ ip='1.2.3.4')
self.patchobject(return_server, 'interface_list', return_value=[iface])
mock_detach = self.patchobject(return_server, 'interface_detach')
mock_attach = self.patchobject(return_server, 'interface_attach')
@@ -3692,9 +3775,10 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
- iface = self.create_fake_iface('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
- '1.2.3.4')
+ iface = create_fake_iface(
+ port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
+ ip='1.2.3.4')
self.patchobject(return_server, 'interface_list', return_value=[iface])
mock_detach = self.patchobject(return_server, 'interface_detach')
mock_attach = self.patchobject(return_server, 'interface_attach')
@@ -3727,10 +3811,9 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
poor_interfaces = [
- self.create_fake_iface('95e25541-d26a-478d-8f36-ae1c8f6b74dc',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '11.12.13.14')
- ]
+ create_fake_iface(port='95e25541-d26a-478d-8f36-ae1c8f6b74dc',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='11.12.13.14')]
self.patchobject(return_server, 'interface_list',
return_value=poor_interfaces)
@@ -3786,16 +3869,15 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
port_interfaces = [
- self.create_fake_iface('95e25541-d26a-478d-8f36-ae1c8f6b74dc',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '11.12.13.14'),
- self.create_fake_iface('4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '21.22.23.24'),
- self.create_fake_iface('0907fa82-a024-43c2-9fc5-efa1bccaa74a',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '31.32.33.34')
- ]
+ create_fake_iface(port='95e25541-d26a-478d-8f36-ae1c8f6b74dc',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='11.12.13.14'),
+ create_fake_iface(port='4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='21.22.23.24'),
+ create_fake_iface(port='0907fa82-a024-43c2-9fc5-efa1bccaa74a',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='31.32.33.34')]
self.patchobject(return_server, 'interface_list',
return_value=port_interfaces)
@@ -3835,19 +3917,18 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
poor_interfaces = [
- self.create_fake_iface('95e25541-d26a-478d-8f36-ae1c8f6b74dc',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '11.12.13.14'),
- self.create_fake_iface('450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '1.2.3.4'),
- self.create_fake_iface('4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '21.22.23.24'),
- self.create_fake_iface('0907fa82-a024-43c2-9fc5-efa1bccaa74a',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '31.32.33.34')
- ]
+ create_fake_iface(port='95e25541-d26a-478d-8f36-ae1c8f6b74dc',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='11.12.13.14'),
+ create_fake_iface(port='450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='1.2.3.4'),
+ create_fake_iface(port='4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='21.22.23.24'),
+ create_fake_iface(port='0907fa82-a024-43c2-9fc5-efa1bccaa74a',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='31.32.33.34')]
self.patchobject(return_server, 'interface_list',
return_value=poor_interfaces)
@@ -3883,16 +3964,15 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
poor_interfaces = [
- self.create_fake_iface('95e25541-d26a-478d-8f36-ae1c8f6b74dc',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '11.12.13.14'),
- self.create_fake_iface('4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '21.22.23.24'),
- self.create_fake_iface('0907fa82-a024-43c2-9fc5-efa1bccaa74a',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '31.32.33.34')
- ]
+ create_fake_iface(port='95e25541-d26a-478d-8f36-ae1c8f6b74dc',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='11.12.13.14'),
+ create_fake_iface(port='4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='21.22.23.24'),
+ create_fake_iface(port='0907fa82-a024-43c2-9fc5-efa1bccaa74a',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='31.32.33.34')]
self.patchobject(return_server, 'interface_list',
return_value=poor_interfaces)
mock_detach = self.patchobject(return_server, 'interface_detach')
@@ -3927,16 +4007,15 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
poor_interfaces = [
- self.create_fake_iface('95e25541-d26a-478d-8f36-ae1c8f6b74dc',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '11.12.13.14'),
- self.create_fake_iface('4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '21.22.23.24'),
- self.create_fake_iface('0907fa82-a024-43c2-9fc5-efa1bccaa74a',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '31.32.33.34')
- ]
+ create_fake_iface(port='95e25541-d26a-478d-8f36-ae1c8f6b74dc',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='11.12.13.14'),
+ create_fake_iface(port='4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='21.22.23.24'),
+ create_fake_iface(port='0907fa82-a024-43c2-9fc5-efa1bccaa74a',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='31.32.33.34')]
self.patchobject(return_server, 'interface_list',
return_value=poor_interfaces)
@@ -3973,13 +4052,12 @@ class ServersTest(common.HeatTestCase):
self.patchobject(self.fc.servers, 'get', return_value=return_server)
poor_interfaces = [
- self.create_fake_iface('95e25541-d26a-478d-8f36-ae1c8f6b74dc',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '11.12.13.14'),
- self.create_fake_iface('4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '21.22.23.24'),
- ]
+ create_fake_iface(port='95e25541-d26a-478d-8f36-ae1c8f6b74dc',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='11.12.13.14'),
+ create_fake_iface(port='4121f61a-1b2e-4ab0-901e-eade9b1cb09d',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='21.22.23.24')]
self.patchobject(return_server, 'interface_list',
return_value=poor_interfaces)
@@ -4586,10 +4664,10 @@ class ServerInternalPortTest(ServersTest):
self.port_create.return_value = {'port': {'id': '7788'}}
data_set = self.patchobject(resource.Resource, 'data_set')
- old_net = [{'network': '4321',
- 'subnet': '1234',
- 'fixed_ip': '127.0.0.1'},
- {'port': '3344'}]
+ old_net = [self.create_old_net(net='4321',
+ subnet='1234',
+ ip='127.0.0.1'),
+ self.create_old_net(port='3344')]
new_net = [{'port': '3344'},
{'port': '5566'},
@@ -4597,10 +4675,11 @@ class ServerInternalPortTest(ServersTest):
'subnet': '5678',
'fixed_ip': '10.0.0.1'}
]
- interfaces = [
- self.create_fake_iface('1122', '4321', '127.0.0.1'),
- self.create_fake_iface('3344', '4321', '10.0.0.2'),
- ]
+
+ interfaces = [create_fake_iface(port='1122', net='4321',
+ ip='127.0.0.1', subnet='1234'),
+ create_fake_iface(port='3344', net='4321', ip='10.0.0.2',
+ subnet='subnet')]
server.calculate_networks(old_net, new_net, interfaces)
@@ -4657,16 +4736,17 @@ class ServerInternalPortTest(ServersTest):
'11910',
'1199'])
- old_net = [{'network': '4321',
- 'subnet': '1234',
- 'fixed_ip': '127.0.0.1',
- 'port': '1122',
- 'floating_ip': '1199'},
- {'network': '8765',
- 'subnet': '5678',
- 'fixed_ip': '127.0.0.2',
- 'port': '3344',
- 'floating_ip': '9911'}]
+ old_net = [
+ self.create_old_net(net='4321', subnet='1234', ip='127.0.0.1',
+ port='1122', floating_ip='1199'),
+ self.create_old_net(net='8765', subnet='5678', ip='127.0.0.2',
+ port='3344', floating_ip='9911')
+ ]
+
+ interfaces = [create_fake_iface(port='1122', net='4321',
+ ip='127.0.0.1', subnet='1234'),
+ create_fake_iface(port='3344', net='8765',
+ ip='127.0.0.2', subnet='5678')]
new_net = [{'network': '8765',
'subnet': '5678',
@@ -4679,7 +4759,7 @@ class ServerInternalPortTest(ServersTest):
'floating_ip': '1199',
'port': '1122'}]
- server.calculate_networks(old_net, new_net, [])
+ server.calculate_networks(old_net, new_net, interfaces)
fipa.assert_has_calls((
mock.call('1199', {'floatingip': {'port_id': None}}),
@@ -4811,6 +4891,8 @@ class ServerInternalPortTest(ServersTest):
server._data = {"internal_ports": jsonutils.dumps(port_ids)}
self.patchobject(nova.NovaClientPlugin, 'interface_detach')
self.patchobject(nova.NovaClientPlugin, 'fetch_server')
+ self.patchobject(nova.NovaClientPlugin.check_interface_detach.retry,
+ 'sleep')
nova.NovaClientPlugin.fetch_server.side_effect = [Fake()] * 10
exc = self.assertRaises(exception.InterfaceDetachFailed,
@@ -4938,6 +5020,8 @@ class ServerInternalPortTest(ServersTest):
return_value=True)
self.patchobject(nova.NovaClientPlugin, 'interface_attach')
self.patchobject(nova.NovaClientPlugin, 'fetch_server')
+ self.patchobject(nova.NovaClientPlugin.check_interface_attach.retry,
+ 'sleep')
# need to mock 11 times: 1 for port 1122, 10 for port 3344
nova.NovaClientPlugin.fetch_server.side_effect = [Fake()] * 11
diff --git a/contrib/rackspace/rackspace/resources/__init__.py b/heat/tests/openstack/octavia/__init__.py
index e69de29bb..e69de29bb 100644
--- a/contrib/rackspace/rackspace/resources/__init__.py
+++ b/heat/tests/openstack/octavia/__init__.py
diff --git a/heat/tests/openstack/octavia/inline_templates.py b/heat/tests/openstack/octavia/inline_templates.py
new file mode 100644
index 000000000..9fc65b343
--- /dev/null
+++ b/heat/tests/openstack/octavia/inline_templates.py
@@ -0,0 +1,133 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+LB_TEMPLATE = '''
+heat_template_version: 2016-04-08
+description: Create a loadbalancer
+resources:
+ lb:
+ type: OS::Octavia::LoadBalancer
+ properties:
+ name: my_lb
+ description: my loadbalancer
+ vip_address: 10.0.0.4
+ vip_subnet: sub123
+ provider: octavia
+ tenant_id: 1234
+ admin_state_up: True
+'''
+
+LISTENER_TEMPLATE = '''
+heat_template_version: 2016-04-08
+description: Create a listener
+resources:
+ listener:
+ type: OS::Octavia::Listener
+ properties:
+ protocol_port: 80
+ protocol: TCP
+ loadbalancer: 123
+ default_pool: my_pool
+ name: my_listener
+ description: my listener
+ admin_state_up: True
+ default_tls_container_ref: ref
+ sni_container_refs:
+ - ref1
+ - ref2
+ connection_limit: -1
+ tenant_id: 1234
+'''
+
+POOL_TEMPLATE = '''
+heat_template_version: 2016-04-08
+description: Create a pool
+resources:
+ pool:
+ type: OS::Octavia::Pool
+ properties:
+ name: my_pool
+ description: my pool
+ session_persistence:
+ type: HTTP_COOKIE
+ lb_algorithm: ROUND_ROBIN
+ loadbalancer: my_lb
+ listener: 123
+ protocol: HTTP
+ admin_state_up: True
+'''
+
+MEMBER_TEMPLATE = '''
+heat_template_version: 2016-04-08
+description: Create a pool member
+resources:
+ member:
+ type: OS::Octavia::PoolMember
+ properties:
+ pool: 123
+ address: 1.2.3.4
+ protocol_port: 80
+ weight: 1
+ subnet: sub123
+ admin_state_up: True
+'''
+
+MONITOR_TEMPLATE = '''
+heat_template_version: 2016-04-08
+description: Create a health monitor
+resources:
+ monitor:
+ type: OS::Octavia::HealthMonitor
+ properties:
+ admin_state_up: True
+ delay: 3
+ expected_codes: 200-202
+ http_method: HEAD
+ max_retries: 5
+ pool: 123
+ timeout: 10
+ type: HTTP
+ url_path: /health
+'''
+
+L7POLICY_TEMPLATE = '''
+heat_template_version: 2016-04-08
+description: Template to test L7Policy Neutron resource
+resources:
+ l7policy:
+ type: OS::Octavia::L7Policy
+ properties:
+ admin_state_up: True
+ name: test_l7policy
+ description: test l7policy resource
+ action: REDIRECT_TO_URL
+ redirect_url: http://www.mirantis.com
+ listener: 123
+ position: 1
+'''
+
+L7RULE_TEMPLATE = '''
+heat_template_version: 2016-04-08
+description: Template to test L7Rule Neutron resource
+resources:
+ l7rule:
+ type: OS::Octavia::L7Rule
+ properties:
+ admin_state_up: True
+ l7policy: 123
+ type: HEADER
+ compare_type: ENDS_WITH
+ key: test_key
+ value: test_value
+ invert: False
+'''
diff --git a/heat/tests/openstack/octavia/test_health_monitor.py b/heat/tests/openstack/octavia/test_health_monitor.py
new file mode 100644
index 000000000..880473881
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_health_monitor.py
@@ -0,0 +1,149 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from osc_lib import exceptions
+
+from heat.common import template_format
+from heat.engine.resources.openstack.octavia import health_monitor
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class HealthMonitorTest(common.HeatTestCase):
+
+ def _create_stack(self, tmpl=inline_templates.MONITOR_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.healthmonitor = self.stack['monitor']
+
+ self.octavia_client = mock.MagicMock()
+ self.healthmonitor.client = mock.MagicMock(
+ return_value=self.octavia_client)
+ self.healthmonitor.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+
+ def test_resource_mapping(self):
+ mapping = health_monitor.resource_mapping()
+ self.assertEqual(health_monitor.HealthMonitor,
+ mapping['OS::Octavia::HealthMonitor'])
+
+ def test_create(self):
+ self._create_stack()
+ self.octavia_client.health_monitor_show.side_effect = [
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.health_monitor_create.side_effect = [
+ exceptions.Conflict(409), {'healthmonitor': {'id': '1234'}}
+ ]
+ expected = {
+ 'healthmonitor': {
+ 'admin_state_up': True,
+ 'delay': 3,
+ 'expected_codes': '200-202',
+ 'http_method': 'HEAD',
+ 'max_retries': 5,
+ 'pool_id': '123',
+ 'timeout': 10,
+ 'type': 'HTTP',
+ 'url_path': '/health'
+ }
+ }
+
+ props = self.healthmonitor.handle_create()
+
+ self.assertFalse(self.healthmonitor.check_create_complete(props))
+ self.octavia_client.health_monitor_create.assert_called_with(
+ json=expected)
+ self.assertFalse(self.healthmonitor.check_create_complete(props))
+ self.octavia_client.health_monitor_create.assert_called_with(
+ json=expected)
+ self.assertFalse(self.healthmonitor.check_create_complete(props))
+ self.assertTrue(self.healthmonitor.check_create_complete(props))
+
+ def test_show_resource(self):
+ self._create_stack()
+ self.healthmonitor.resource_id_set('1234')
+
+ self.assertTrue(self.healthmonitor._show_resource())
+
+ self.octavia_client.health_monitor_show.assert_called_with(
+ '1234')
+
+ def test_update(self):
+ self._create_stack()
+ self.healthmonitor.resource_id_set('1234')
+ self.octavia_client.health_monitor_show.side_effect = [
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.health_monitor_set.side_effect = [
+ exceptions.Conflict(409), None]
+ prop_diff = {
+ 'admin_state_up': False,
+ }
+
+ prop_diff = self.healthmonitor.handle_update(None, None, prop_diff)
+
+ self.assertFalse(self.healthmonitor.check_update_complete(prop_diff))
+ self.assertFalse(self.healthmonitor._update_called)
+ self.octavia_client.health_monitor_set.assert_called_with(
+ '1234', json={'healthmonitor': prop_diff})
+ self.assertFalse(self.healthmonitor.check_update_complete(prop_diff))
+ self.assertTrue(self.healthmonitor._update_called)
+ self.octavia_client.health_monitor_set.assert_called_with(
+ '1234', json={'healthmonitor': prop_diff})
+ self.assertFalse(self.healthmonitor.check_update_complete(prop_diff))
+ self.assertTrue(self.healthmonitor.check_update_complete(prop_diff))
+
+ def test_delete(self):
+ self._create_stack()
+ self.healthmonitor.resource_id_set('1234')
+ self.octavia_client.health_monitor_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'DELETED'},
+ ]
+ self.octavia_client.health_monitor_delete.side_effect = [
+ exceptions.Conflict(409),
+ None]
+
+ self.healthmonitor.handle_delete()
+
+ self.assertFalse(self.healthmonitor.check_delete_complete(None))
+ self.assertFalse(self.healthmonitor._delete_called)
+ self.octavia_client.health_monitor_delete.assert_called_with(
+ '1234')
+ self.assertFalse(self.healthmonitor.check_delete_complete(None))
+ self.assertTrue(self.healthmonitor._delete_called)
+ self.octavia_client.health_monitor_delete.assert_called_with(
+ '1234')
+ self.assertTrue(self.healthmonitor.check_delete_complete(None))
+
+ def test_delete_failed(self):
+ self._create_stack()
+ self.healthmonitor.resource_id_set('1234')
+ self.octavia_client.health_monitor_delete.side_effect = (
+ exceptions.Unauthorized(401))
+
+ self.healthmonitor.handle_delete()
+ self.assertRaises(exceptions.Unauthorized,
+ self.healthmonitor.check_delete_complete, None)
+
+ self.octavia_client.health_monitor_delete.assert_called_with(
+ '1234')
diff --git a/heat/tests/openstack/octavia/test_l7policy.py b/heat/tests/openstack/octavia/test_l7policy.py
new file mode 100644
index 000000000..c66eff043
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_l7policy.py
@@ -0,0 +1,263 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import yaml
+
+from osc_lib import exceptions
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.common import template_format
+from heat.engine.resources.openstack.octavia import l7policy
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class L7PolicyTest(common.HeatTestCase):
+
+ def test_resource_mapping(self):
+ mapping = l7policy.resource_mapping()
+ self.assertEqual(mapping['OS::Octavia::L7Policy'],
+ l7policy.L7Policy)
+
+ def _create_stack(self, tmpl=inline_templates.L7POLICY_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.l7policy = self.stack['l7policy']
+
+ self.octavia_client = mock.MagicMock()
+ self.l7policy.client = mock.MagicMock(
+ return_value=self.octavia_client)
+ self.l7policy.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+
+ def test_validate_reject_action_with_conflicting_props(self):
+ tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE)
+ props = tmpl['resources']['l7policy']['properties']
+ props['action'] = 'REJECT'
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ msg = _('Properties redirect_pool and redirect_url are not '
+ 'required when action type is set to REJECT.')
+ with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.'
+ 'has_extension', return_value=True):
+ self.assertRaisesRegex(exception.StackValidationFailed,
+ msg, self.l7policy.validate)
+
+ def test_validate_redirect_pool_action_with_url(self):
+ tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE)
+ props = tmpl['resources']['l7policy']['properties']
+ props['action'] = 'REDIRECT_TO_POOL'
+ props['redirect_pool'] = '123'
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ msg = _('redirect_url property should only be specified '
+ 'for action with value REDIRECT_TO_URL.')
+ with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.'
+ 'has_extension', return_value=True):
+ self.assertRaisesRegex(exception.ResourcePropertyValueDependency,
+ msg, self.l7policy.validate)
+
+ def test_validate_redirect_pool_action_without_pool(self):
+ tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE)
+ props = tmpl['resources']['l7policy']['properties']
+ props['action'] = 'REDIRECT_TO_POOL'
+ del props['redirect_url']
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ msg = _('Property redirect_pool is required when action type '
+ 'is set to REDIRECT_TO_POOL.')
+ with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.'
+ 'has_extension', return_value=True):
+ self.assertRaisesRegex(exception.StackValidationFailed,
+ msg, self.l7policy.validate)
+
+ def test_validate_redirect_url_action_with_pool(self):
+ tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE)
+ props = tmpl['resources']['l7policy']['properties']
+ props['redirect_pool'] = '123'
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ msg = _('redirect_pool property should only be specified '
+ 'for action with value REDIRECT_TO_POOL.')
+ with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.'
+ 'has_extension', return_value=True):
+ self.assertRaisesRegex(exception.ResourcePropertyValueDependency,
+ msg, self.l7policy.validate)
+
+ def test_validate_redirect_url_action_without_url(self):
+ tmpl = yaml.safe_load(inline_templates.L7POLICY_TEMPLATE)
+ props = tmpl['resources']['l7policy']['properties']
+ del props['redirect_url']
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ msg = _('Property redirect_url is required when action type '
+ 'is set to REDIRECT_TO_URL.')
+ with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.'
+ 'has_extension', return_value=True):
+ self.assertRaisesRegex(exception.StackValidationFailed,
+ msg, self.l7policy.validate)
+
+ def test_create(self):
+ self._create_stack()
+ self.octavia_client.l7policy_show.side_effect = [
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+
+ self.octavia_client.l7policy_create.side_effect = [
+ exceptions.Conflict(409),
+ {'l7policy': {'id': '1234'}}
+ ]
+ expected = {
+ 'l7policy': {
+ 'name': u'test_l7policy',
+ 'description': u'test l7policy resource',
+ 'action': u'REDIRECT_TO_URL',
+ 'listener_id': u'123',
+ 'redirect_url': u'http://www.mirantis.com',
+ 'position': 1,
+ 'admin_state_up': True
+ }
+ }
+
+ props = self.l7policy.handle_create()
+
+ self.assertFalse(self.l7policy.check_create_complete(props))
+ self.octavia_client.l7policy_create.assert_called_with(json=expected)
+ self.assertFalse(self.l7policy.check_create_complete(props))
+ self.octavia_client.l7policy_create.assert_called_with(json=expected)
+ self.assertFalse(self.l7policy.check_create_complete(props))
+ self.assertTrue(self.l7policy.check_create_complete(props))
+
+ def test_create_missing_properties(self):
+ for prop in ('action', 'listener'):
+ tmpl = yaml.load(inline_templates.L7POLICY_TEMPLATE)
+ del tmpl['resources']['l7policy']['properties'][prop]
+ self._create_stack(tmpl=yaml.dump(tmpl))
+
+ self.assertRaises(exception.StackValidationFailed,
+ self.l7policy.validate)
+
+ def test_show_resource(self):
+ self._create_stack()
+ self.l7policy.resource_id_set('1234')
+ self.octavia_client.l7policy_show.return_value = {'id': '1234'}
+
+ self.assertEqual({'id': '1234'}, self.l7policy._show_resource())
+
+ self.octavia_client.l7policy_show.assert_called_with('1234')
+
+ def test_update(self):
+ self._create_stack()
+ self.l7policy.resource_id_set('1234')
+ self.octavia_client.l7policy_show.side_effect = [
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.l7policy_set.side_effect = [
+ exceptions.Conflict(409), None]
+ prop_diff = {
+ 'admin_state_up': False,
+ 'name': 'your_l7policy',
+ 'redirect_url': 'http://www.google.com'
+ }
+
+ prop_diff = self.l7policy.handle_update(None, None, prop_diff)
+
+ self.assertFalse(self.l7policy.check_update_complete(prop_diff))
+ self.assertFalse(self.l7policy._update_called)
+ self.octavia_client.l7policy_set.assert_called_with(
+ '1234', json={'l7policy': prop_diff})
+ self.assertFalse(self.l7policy.check_update_complete(prop_diff))
+ self.assertTrue(self.l7policy._update_called)
+ self.octavia_client.l7policy_set.assert_called_with(
+ '1234', json={'l7policy': prop_diff})
+ self.assertFalse(self.l7policy.check_update_complete(prop_diff))
+ self.assertTrue(self.l7policy.check_update_complete(prop_diff))
+
+ def test_update_redirect_pool_prop_name(self):
+ self._create_stack()
+ self.l7policy.resource_id_set('1234')
+ self.octavia_client.l7policy_show.side_effect = [
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.l7policy_set.side_effect = [
+ exceptions.Conflict(409), None]
+
+ unresolved_diff = {
+ 'redirect_url': None,
+ 'action': 'REDIRECT_TO_POOL',
+ 'redirect_pool': 'UNRESOLVED_POOL'
+ }
+ resolved_diff = {
+ 'redirect_url': None,
+ 'action': 'REDIRECT_TO_POOL',
+ 'redirect_pool_id': '123'
+ }
+
+ self.l7policy.handle_update(None, None, unresolved_diff)
+
+ self.assertFalse(self.l7policy.check_update_complete(resolved_diff))
+ self.assertFalse(self.l7policy._update_called)
+ self.octavia_client.l7policy_set.assert_called_with(
+ '1234', json={'l7policy': resolved_diff})
+ self.assertFalse(self.l7policy.check_update_complete(resolved_diff))
+ self.assertTrue(self.l7policy._update_called)
+ self.octavia_client.l7policy_set.assert_called_with(
+ '1234', json={'l7policy': resolved_diff})
+ self.assertFalse(self.l7policy.check_update_complete(resolved_diff))
+ self.assertTrue(self.l7policy.check_update_complete(resolved_diff))
+
+ def test_delete(self):
+ self._create_stack()
+ self.l7policy.resource_id_set('1234')
+ self.octavia_client.l7policy_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'DELETED'},
+ ]
+ self.octavia_client.l7policy_delete.side_effect = [
+ exceptions.Conflict(409),
+ None]
+
+ self.l7policy.handle_delete()
+
+ self.assertFalse(self.l7policy.check_delete_complete(None))
+ self.assertFalse(self.l7policy._delete_called)
+ self.octavia_client.l7policy_delete.assert_called_with(
+ '1234')
+ self.assertFalse(self.l7policy.check_delete_complete(None))
+ self.assertTrue(self.l7policy._delete_called)
+ self.octavia_client.l7policy_delete.assert_called_with(
+ '1234')
+ self.assertTrue(self.l7policy.check_delete_complete(None))
+
+ def test_delete_failed(self):
+ self._create_stack()
+ self.l7policy.resource_id_set('1234')
+ self.octavia_client.l7policy_delete.side_effect = (
+ exceptions.Unauthorized(401))
+
+ self.l7policy.handle_delete()
+ self.assertRaises(exceptions.Unauthorized,
+ self.l7policy.check_delete_complete, None)
+
+ self.octavia_client.l7policy_delete.assert_called_with(
+ '1234')
diff --git a/heat/tests/openstack/octavia/test_l7rule.py b/heat/tests/openstack/octavia/test_l7rule.py
new file mode 100644
index 000000000..2d46d6fca
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_l7rule.py
@@ -0,0 +1,178 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import yaml
+
+from osc_lib import exceptions
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.common import template_format
+from heat.engine.resources.openstack.octavia import l7rule
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class L7RuleTest(common.HeatTestCase):
+
+ def test_resource_mapping(self):
+ mapping = l7rule.resource_mapping()
+ self.assertEqual(mapping['OS::Octavia::L7Rule'],
+ l7rule.L7Rule)
+
+ def _create_stack(self, tmpl=inline_templates.L7RULE_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.l7rule = self.stack['l7rule']
+
+ self.octavia_client = mock.MagicMock()
+ self.l7rule.client = mock.MagicMock(
+ return_value=self.octavia_client)
+ self.l7rule.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+
+ def test_validate_when_key_required(self):
+ tmpl = yaml.safe_load(inline_templates.L7RULE_TEMPLATE)
+ props = tmpl['resources']['l7rule']['properties']
+ del props['key']
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ msg = _('Property key is missing. This property should be '
+ 'specified for rules of HEADER and COOKIE types.')
+ with mock.patch('heat.engine.clients.os.neutron.NeutronClientPlugin.'
+ 'has_extension', return_value=True):
+ self.assertRaisesRegex(exception.StackValidationFailed,
+ msg, self.l7rule.validate)
+
+ def test_create(self):
+ self._create_stack()
+ self.octavia_client.l7rule_show.side_effect = [
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.l7rule_create.side_effect = [
+ exceptions.Conflict(409),
+ {'rule': {'id': '1234'}}
+ ]
+ expected = {
+ 'rule': {
+ 'admin_state_up': True,
+ 'invert': False,
+ 'type': u'HEADER',
+ 'compare_type': u'ENDS_WITH',
+ 'key': u'test_key',
+ 'value': u'test_value',
+ 'invert': False
+ }
+ }
+
+ props = self.l7rule.handle_create()
+ self.assertFalse(self.l7rule.check_create_complete(props))
+ self.octavia_client.l7rule_create.assert_called_with('123',
+ json=expected)
+ self.assertFalse(self.l7rule.check_create_complete(props))
+ self.octavia_client.l7rule_create.assert_called_with('123',
+ json=expected)
+ self.assertFalse(self.l7rule.check_create_complete(props))
+ self.assertTrue(self.l7rule.check_create_complete(props))
+
+ def test_create_missing_properties(self):
+ for prop in ('l7policy', 'type', 'compare_type', 'value'):
+ tmpl = yaml.safe_load(inline_templates.L7RULE_TEMPLATE)
+ del tmpl['resources']['l7rule']['properties'][prop]
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ self.assertRaises(exception.StackValidationFailed,
+ self.l7rule.validate)
+
+ def test_show_resource(self):
+ self._create_stack()
+ self.l7rule.resource_id_set('1234')
+ self.octavia_client.l7rule_show.return_value = {'id': '1234'}
+
+ self.assertEqual({'id': '1234'}, self.l7rule._show_resource())
+
+ self.octavia_client.l7rule_show.assert_called_with('1234', '123')
+
+ def test_update(self):
+ self._create_stack()
+ self.l7rule.resource_id_set('1234')
+ self.octavia_client.l7rule_show.side_effect = [
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.l7rule_set.side_effect = [
+ exceptions.Conflict(409), None]
+ prop_diff = {
+ 'admin_state_up': False,
+ 'name': 'your_l7policy',
+ 'redirect_url': 'http://www.google.com'
+ }
+
+ prop_diff = self.l7rule.handle_update(None, None, prop_diff)
+
+ self.assertFalse(self.l7rule.check_update_complete(prop_diff))
+ self.assertFalse(self.l7rule._update_called)
+ self.octavia_client.l7rule_set.assert_called_with(
+ '1234', '123', json={'rule': prop_diff})
+ self.assertFalse(self.l7rule.check_update_complete(prop_diff))
+ self.assertTrue(self.l7rule._update_called)
+ self.octavia_client.l7rule_set.assert_called_with(
+ '1234', '123', json={'rule': prop_diff})
+ self.assertFalse(self.l7rule.check_update_complete(prop_diff))
+ self.assertTrue(self.l7rule.check_update_complete(prop_diff))
+
+ def test_delete(self):
+ self._create_stack()
+ self.l7rule.resource_id_set('1234')
+ self.octavia_client.l7rule_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'DELETED'},
+ ]
+ self.octavia_client.l7rule_delete.side_effect = [
+ exceptions.Conflict(409),
+ None]
+
+ self.l7rule.handle_delete()
+
+ self.assertFalse(self.l7rule.check_delete_complete(None))
+ self.assertFalse(self.l7rule._delete_called)
+ self.assertFalse(self.l7rule.check_delete_complete(None))
+ self.assertTrue(self.l7rule._delete_called)
+ self.octavia_client.l7rule_delete.assert_called_with(
+ '1234', '123')
+ self.assertTrue(self.l7rule.check_delete_complete(None))
+
+ def test_delete_already_gone(self):
+ self._create_stack()
+ self.l7rule.resource_id_set('1234')
+ self.octavia_client.l7rule_delete.side_effect = (
+ exceptions.NotFound(404))
+
+ self.l7rule.handle_delete()
+ self.assertTrue(self.l7rule.check_delete_complete(None))
+
+ def test_delete_failed(self):
+ self._create_stack()
+ self.l7rule.resource_id_set('1234')
+ self.octavia_client.l7rule_delete.side_effect = (
+ exceptions.Unauthorized(401))
+
+ self.l7rule.handle_delete()
+ self.assertRaises(exceptions.Unauthorized,
+ self.l7rule.check_delete_complete, None)
diff --git a/heat/tests/openstack/octavia/test_listener.py b/heat/tests/openstack/octavia/test_listener.py
new file mode 100644
index 000000000..e4ddc85c7
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_listener.py
@@ -0,0 +1,187 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+import yaml
+
+from osc_lib import exceptions
+
+from heat.common import exception
+from heat.common import template_format
+from heat.engine.resources.openstack.octavia import listener
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class ListenerTest(common.HeatTestCase):
+
+ def test_resource_mapping(self):
+ mapping = listener.resource_mapping()
+ self.assertEqual(listener.Listener,
+ mapping['OS::Octavia::Listener'])
+
+ def _create_stack(self, tmpl=inline_templates.LISTENER_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.listener = self.stack['listener']
+
+ self.octavia_client = mock.MagicMock()
+ self.listener.client = mock.MagicMock(return_value=self.octavia_client)
+ self.listener.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+
+ def test_validate_terminated_https(self):
+ tmpl = yaml.safe_load(inline_templates.LISTENER_TEMPLATE)
+ props = tmpl['resources']['listener']['properties']
+ props['protocol'] = 'TERMINATED_HTTPS'
+ del props['default_tls_container_ref']
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ self.assertRaises(exception.StackValidationFailed,
+ self.listener.validate)
+
+ def test_create(self):
+ self._create_stack()
+ self.octavia_client.listener_show.side_effect = [
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.listener_create.side_effect = [
+ exceptions.Conflict(409), {'listener': {'id': '1234'}}
+ ]
+ expected = {
+ 'listener': {
+ 'protocol_port': 80,
+ 'protocol': 'TCP',
+ 'loadbalancer_id': '123',
+ 'default_pool_id': 'my_pool',
+ 'name': 'my_listener',
+ 'description': 'my listener',
+ 'admin_state_up': True,
+ 'default_tls_container_ref': 'ref',
+ 'sni_container_refs': ['ref1', 'ref2'],
+ 'connection_limit': -1,
+ 'tenant_id': '1234',
+ }
+ }
+
+ props = self.listener.handle_create()
+
+ self.assertFalse(self.listener.check_create_complete(props))
+ self.octavia_client.listener_create.assert_called_with(json=expected)
+ self.assertFalse(self.listener.check_create_complete(props))
+ self.octavia_client.listener_create.assert_called_with(json=expected)
+ self.assertFalse(self.listener.check_create_complete(props))
+ self.assertTrue(self.listener.check_create_complete(props))
+
+ def test_create_missing_properties(self):
+ for prop in ('protocol', 'protocol_port', 'loadbalancer'):
+ tmpl = yaml.safe_load(inline_templates.LISTENER_TEMPLATE)
+ del tmpl['resources']['listener']['properties'][prop]
+ del tmpl['resources']['listener']['properties']['default_pool']
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+ if prop == 'loadbalancer':
+ self.assertRaises(exception.PropertyUnspecifiedError,
+ self.listener.validate)
+ else:
+ self.assertRaises(exception.StackValidationFailed,
+ self.listener.validate)
+
+ def test_show_resource(self):
+ self._create_stack()
+ self.listener.resource_id_set('1234')
+ self.octavia_client.listener_show.return_value = {'id': '1234'}
+ self.assertEqual({'id': '1234'}, self.listener._show_resource())
+
+ self.octavia_client.listener_show.assert_called_with('1234')
+
+ def test_update(self):
+ self._create_stack()
+ self.listener.resource_id_set('1234')
+ self.octavia_client.listener_show.side_effect = [
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.listener_set.side_effect = [
+ exceptions.Conflict(409), None]
+ prop_diff = {
+ 'admin_state_up': False,
+ 'name': 'your_listener',
+ }
+
+ prop_diff = self.listener.handle_update(self.listener.t,
+ None, prop_diff)
+
+ self.assertFalse(self.listener.check_update_complete(prop_diff))
+ self.assertFalse(self.listener._update_called)
+ self.octavia_client.listener_set.assert_called_with(
+ '1234', json={'listener': prop_diff})
+ self.assertFalse(self.listener.check_update_complete(prop_diff))
+ self.assertTrue(self.listener._update_called)
+ self.octavia_client.listener_set.assert_called_with(
+ '1234', json={'listener': prop_diff})
+ self.assertFalse(self.listener.check_update_complete(prop_diff))
+ self.assertTrue(self.listener.check_update_complete(prop_diff))
+
+ def test_delete(self):
+ self._create_stack()
+ self.listener.resource_id_set('1234')
+ self.octavia_client.listener_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'DELETED'},
+ ]
+ self.octavia_client.listener_delete.side_effect = [
+ exceptions.Conflict(409), None]
+
+ self.listener.handle_delete()
+
+ self.assertFalse(self.listener.check_delete_complete(None))
+ self.assertFalse(self.listener._delete_called)
+ self.octavia_client.listener_delete.assert_called_with('1234')
+ self.assertFalse(self.listener.check_delete_complete(None))
+ self.assertTrue(self.listener._delete_called)
+ self.octavia_client.listener_delete.assert_called_with('1234')
+ self.assertTrue(self.listener.check_delete_complete(None))
+
+ def test_delete_not_found(self):
+ self._create_stack()
+ self.listener.resource_id_set('1234')
+ self.octavia_client.listener_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ ]
+ self.octavia_client.listener_delete.side_effect = [
+ exceptions.Conflict(409),
+ exceptions.NotFound(404)]
+
+ self.listener.handle_delete()
+
+ self.assertFalse(self.listener.check_delete_complete(None))
+ self.assertFalse(self.listener._delete_called)
+ self.octavia_client.listener_delete.assert_called_with('1234')
+ self.assertTrue(self.listener.check_delete_complete(None))
+ self.octavia_client.listener_delete.assert_called_with('1234')
+
+ def test_delete_failed(self):
+ self._create_stack()
+ self.listener.resource_id_set('1234')
+ self.octavia_client.listener_delete.side_effect = (
+ exceptions.Unauthorized(401))
+
+ self.listener.handle_delete()
+ self.assertRaises(exceptions.Unauthorized,
+ self.listener.check_delete_complete, None)
diff --git a/heat/tests/openstack/octavia/test_loadbalancer.py b/heat/tests/openstack/octavia/test_loadbalancer.py
new file mode 100644
index 000000000..969ce3b66
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_loadbalancer.py
@@ -0,0 +1,170 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutronclient.neutron import v2_0 as neutronV20
+from osc_lib import exceptions
+
+from heat.common import exception
+from heat.common import template_format
+from heat.engine.resources.openstack.octavia import loadbalancer
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class LoadBalancerTest(common.HeatTestCase):
+
+ def test_resource_mapping(self):
+ mapping = loadbalancer.resource_mapping()
+ self.assertEqual(loadbalancer.LoadBalancer,
+ mapping['OS::Octavia::LoadBalancer'])
+
+ def _create_stack(self, tmpl=inline_templates.LB_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.lb = self.stack['lb']
+ self.octavia_client = mock.MagicMock()
+ self.lb.client = mock.MagicMock()
+ self.lb.client.return_value = self.octavia_client
+
+ self.patchobject(neutronV20, 'find_resourceid_by_name_or_id',
+ return_value='123')
+
+ self.lb.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+ self.lb.translate_properties(self.lb.properties)
+ self.lb.resource_id_set('1234')
+
+ def test_create(self):
+ self._create_stack()
+ expected = {
+ 'loadbalancer': {
+ 'name': 'my_lb',
+ 'description': 'my loadbalancer',
+ 'vip_address': '10.0.0.4',
+ 'vip_subnet_id': '123',
+ 'provider': 'octavia',
+ 'tenant_id': '1234',
+ 'admin_state_up': True,
+ }
+ }
+
+ self.lb.handle_create()
+
+ self.octavia_client.load_balancer_create.assert_called_with(
+ json=expected)
+
+ def test_check_create_complete(self):
+ self._create_stack()
+ self.octavia_client.load_balancer_show.side_effect = [
+ {'provisioning_status': 'ACTIVE'},
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'ERROR'},
+ ]
+
+ self.assertTrue(self.lb.check_create_complete(None))
+ self.assertFalse(self.lb.check_create_complete(None))
+ self.assertRaises(exception.ResourceInError,
+ self.lb.check_create_complete, None)
+
+ def test_show_resource(self):
+ self._create_stack()
+ self.octavia_client.load_balancer_show.return_value = {'id': '1234'}
+ self.assertEqual({'id': '1234'}, self.lb._show_resource())
+
+ self.octavia_client.load_balancer_show.assert_called_with('1234')
+
+ def test_update(self):
+ self._create_stack()
+ prop_diff = {
+ 'name': 'lb',
+ 'description': 'a loadbalancer',
+ 'admin_state_up': False,
+ }
+
+ prop_diff = self.lb.handle_update(None, None, prop_diff)
+
+ self.octavia_client.load_balancer_set.assert_called_once_with(
+ '1234', json={'loadbalancer': prop_diff})
+
+ def test_update_complete(self):
+ self._create_stack()
+ prop_diff = {
+ 'name': 'lb',
+ 'description': 'a loadbalancer',
+ 'admin_state_up': False,
+ }
+ self.octavia_client.load_balancer_show.side_effect = [
+ {'provisioning_status': 'ACTIVE'},
+ {'provisioning_status': 'PENDING_UPDATE'},
+ ]
+
+ self.lb.handle_update(None, None, prop_diff)
+
+ self.assertTrue(self.lb.check_update_complete(prop_diff))
+ self.assertFalse(self.lb.check_update_complete(prop_diff))
+ self.assertTrue(self.lb.check_update_complete({}))
+
+ def test_delete(self):
+ self._create_stack()
+ self.octavia_client.load_balancer_show.side_effect = [
+ {'provisioning_status': 'DELETE_PENDING'},
+ {'provisioning_status': 'DELETE_PENDING'},
+ {'provisioning_status': 'DELETED'},
+ ]
+
+ self.octavia_client.load_balancer_delete.side_effect = [
+ exceptions.Conflict(409),
+ None
+ ]
+
+ self.lb.handle_delete()
+
+ self.assertFalse(self.lb.check_delete_complete(None))
+ self.assertFalse(self.lb._delete_called)
+ self.assertFalse(self.lb.check_delete_complete(None))
+ self.assertTrue(self.lb._delete_called)
+ self.assertTrue(self.lb.check_delete_complete(None))
+ self.octavia_client.load_balancer_delete.assert_called_with('1234')
+ self.assertEqual(
+ 2, self.octavia_client.load_balancer_delete.call_count)
+
+ def test_delete_error(self):
+ self._create_stack()
+ self.octavia_client.load_balancer_show.side_effect = [
+ {'provisioning_status': 'DELETE_PENDING'},
+ ]
+
+ self.octavia_client.load_balancer_delete.side_effect = [
+ exceptions.Conflict(409),
+ exceptions.NotFound(404)
+ ]
+
+ self.lb.handle_delete()
+
+ self.assertFalse(self.lb.check_delete_complete(None))
+ self.assertTrue(self.lb.check_delete_complete(None))
+ self.octavia_client.load_balancer_delete.assert_called_with('1234')
+ self.assertEqual(
+ 2, self.octavia_client.load_balancer_delete.call_count)
+
+ def test_delete_failed(self):
+ self._create_stack()
+ self.octavia_client.load_balancer_delete.side_effect = (
+ exceptions.Unauthorized(403))
+
+ self.lb.handle_delete()
+ self.assertRaises(exceptions.Unauthorized,
+ self.lb.check_delete_complete, None)
diff --git a/heat/tests/openstack/octavia/test_pool.py b/heat/tests/openstack/octavia/test_pool.py
new file mode 100644
index 000000000..f4e74930f
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_pool.py
@@ -0,0 +1,200 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import yaml
+
+from osc_lib import exceptions
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.common import template_format
+from heat.engine.resources.openstack.octavia import pool
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class PoolTest(common.HeatTestCase):
+
+ def test_resource_mapping(self):
+ mapping = pool.resource_mapping()
+ self.assertEqual(pool.Pool,
+ mapping['OS::Octavia::Pool'])
+
+ def _create_stack(self, tmpl=inline_templates.POOL_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.pool = self.stack['pool']
+
+ self.octavia_client = mock.MagicMock()
+ self.pool.client = mock.MagicMock(return_value=self.octavia_client)
+
+ self.pool.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+
+ def test_validate_no_cookie_name(self):
+ tmpl = yaml.safe_load(inline_templates.POOL_TEMPLATE)
+ sp = tmpl['resources']['pool']['properties']['session_persistence']
+ sp['type'] = 'APP_COOKIE'
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ msg = _('Property cookie_name is required when '
+ 'session_persistence type is set to APP_COOKIE.')
+ self.assertRaisesRegex(exception.StackValidationFailed,
+ msg, self.pool.validate)
+
+ def test_validate_source_ip_cookie_name(self):
+ tmpl = yaml.safe_load(inline_templates.POOL_TEMPLATE)
+ sp = tmpl['resources']['pool']['properties']['session_persistence']
+ sp['type'] = 'SOURCE_IP'
+ sp['cookie_name'] = 'cookie'
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+
+ msg = _('Property cookie_name must NOT be specified when '
+ 'session_persistence type is set to SOURCE_IP.')
+ self.assertRaisesRegex(exception.StackValidationFailed,
+ msg, self.pool.validate)
+
+ def test_create(self):
+ self._create_stack()
+ self.octavia_client.pool_show.side_effect = [
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.pool_create.side_effect = [
+ exceptions.Conflict(409), {'pool': {'id': '1234'}}
+ ]
+ expected = {
+ 'pool': {
+ 'name': 'my_pool',
+ 'description': 'my pool',
+ 'session_persistence': {
+ 'type': 'HTTP_COOKIE'
+ },
+ 'lb_algorithm': 'ROUND_ROBIN',
+ 'listener_id': '123',
+ 'loadbalancer_id': 'my_lb',
+ 'protocol': 'HTTP',
+ 'admin_state_up': True
+ }
+ }
+
+ props = self.pool.handle_create()
+
+ self.assertFalse(self.pool.check_create_complete(props))
+ self.octavia_client.pool_create.assert_called_with(json=expected)
+ self.assertFalse(self.pool.check_create_complete(props))
+ self.octavia_client.pool_create.assert_called_with(json=expected)
+ self.assertFalse(self.pool.check_create_complete(props))
+ self.assertTrue(self.pool.check_create_complete(props))
+
+ def test_create_missing_properties(self):
+ for prop in ('lb_algorithm', 'listener', 'protocol'):
+ tmpl = yaml.safe_load(inline_templates.POOL_TEMPLATE)
+ del tmpl['resources']['pool']['properties']['loadbalancer']
+ del tmpl['resources']['pool']['properties'][prop]
+ self._create_stack(tmpl=yaml.safe_dump(tmpl))
+ if prop == 'listener':
+ self.assertRaises(exception.PropertyUnspecifiedError,
+ self.pool.validate)
+ else:
+ self.assertRaises(exception.StackValidationFailed,
+ self.pool.validate)
+
+ def test_show_resource(self):
+ self._create_stack()
+ self.pool.resource_id_set('1234')
+ self.octavia_client.pool_show.return_value = {'id': '1234'}
+
+ self.assertEqual(self.pool._show_resource(), {'id': '1234'})
+
+ self.octavia_client.pool_show.assert_called_with('1234')
+
+ def test_update(self):
+ self._create_stack()
+ self.pool.resource_id_set('1234')
+ self.octavia_client.pool_show.side_effect = [
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.pool_set.side_effect = [
+ exceptions.Conflict(409), None]
+ prop_diff = {
+ 'admin_state_up': False,
+ 'name': 'your_pool',
+ 'lb_algorithm': 'SOURCE_IP'
+ }
+
+ prop_diff = self.pool.handle_update(None, None, prop_diff)
+
+ self.assertFalse(self.pool.check_update_complete(prop_diff))
+ self.assertFalse(self.pool._update_called)
+ self.octavia_client.pool_set.assert_called_with(
+ '1234', json={'pool': prop_diff})
+ self.assertFalse(self.pool.check_update_complete(prop_diff))
+ self.assertTrue(self.pool._update_called)
+ self.octavia_client.pool_set.assert_called_with(
+ '1234', json={'pool': prop_diff})
+ self.assertFalse(self.pool.check_update_complete(prop_diff))
+ self.assertTrue(self.pool.check_update_complete(prop_diff))
+
+ def test_delete(self):
+ self._create_stack()
+ self.pool.resource_id_set('1234')
+ self.octavia_client.pool_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'DELETED'},
+ ]
+ self.octavia_client.pool_delete.side_effect = [
+ exceptions.Conflict(409),
+ None]
+
+ self.pool.handle_delete()
+
+ self.assertFalse(self.pool.check_delete_complete(None))
+ self.assertFalse(self.pool._delete_called)
+ self.assertFalse(self.pool.check_delete_complete(None))
+ self.assertTrue(self.pool._delete_called)
+ self.octavia_client.pool_delete.assert_called_with('1234')
+ self.assertTrue(self.pool.check_delete_complete(None))
+
+ def test_delete_not_found(self):
+ self._create_stack()
+ self.pool.resource_id_set('1234')
+ self.octavia_client.pool_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ ]
+ self.octavia_client.pool_delete.side_effect = [
+ exceptions.Conflict(409),
+ exceptions.NotFound(404)]
+
+ self.pool.handle_delete()
+
+ self.assertFalse(self.pool.check_delete_complete(None))
+ self.assertFalse(self.pool._delete_called)
+ self.octavia_client.pool_delete.assert_called_with('1234')
+ self.assertTrue(self.pool.check_delete_complete(None))
+
+ def test_delete_failed(self):
+ self._create_stack()
+ self.pool.resource_id_set('1234')
+ self.octavia_client.pool_delete.side_effect = (
+ exceptions.Unauthorized(401))
+
+ self.pool.handle_delete()
+ self.assertRaises(exceptions.Unauthorized,
+ self.pool.check_delete_complete, None)
diff --git a/heat/tests/openstack/octavia/test_pool_member.py b/heat/tests/openstack/octavia/test_pool_member.py
new file mode 100644
index 000000000..93035c18d
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_pool_member.py
@@ -0,0 +1,167 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from neutronclient.neutron import v2_0 as neutronV20
+from osc_lib import exceptions
+
+from heat.common import template_format
+from heat.engine.resources.openstack.octavia import pool_member
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class PoolMemberTest(common.HeatTestCase):
+
+ def test_resource_mapping(self):
+ mapping = pool_member.resource_mapping()
+ self.assertEqual(pool_member.PoolMember,
+ mapping['OS::Octavia::PoolMember'])
+
+ def _create_stack(self, tmpl=inline_templates.MEMBER_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.member = self.stack['member']
+ self.patchobject(neutronV20, 'find_resourceid_by_name_or_id',
+ return_value='123')
+
+ self.octavia_client = mock.MagicMock()
+ self.member.client = mock.MagicMock(return_value=self.octavia_client)
+ self.member.client_plugin().get_pool = (
+ mock.MagicMock(return_value='123'))
+ self.member.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+ self.member.translate_properties(self.member.properties)
+
+ def test_create(self):
+ self._create_stack()
+ self.octavia_client.member_show.side_effect = [
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'PENDING_CREATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.member_create.side_effect = [
+ exceptions.Conflict(409), {'member': {'id': '1234'}}]
+ expected = {
+ 'member': {
+ 'address': '1.2.3.4',
+ 'protocol_port': 80,
+ 'weight': 1,
+ 'subnet_id': '123',
+ 'admin_state_up': True,
+ }
+ }
+ props = self.member.handle_create()
+ self.assertFalse(self.member.check_create_complete(props))
+ self.octavia_client.member_create.assert_called_with('123',
+ json=expected)
+ self.assertFalse(self.member.check_create_complete(props))
+ self.octavia_client.member_create.assert_called_with('123',
+ json=expected)
+ self.assertFalse(self.member.check_create_complete(props))
+ self.assertTrue(self.member.check_create_complete(props))
+
+ def test_show_resource(self):
+ self._create_stack()
+ self.member.resource_id_set('1234')
+ self.octavia_client.member_show.return_value = {'id': '1234'}
+
+ self.assertEqual(self.member._show_resource(), {'id': '1234'})
+
+ self.octavia_client.member_show.assert_called_with('123', '1234')
+
+ def test_update(self):
+ self._create_stack()
+ self.member.resource_id_set('1234')
+ self.octavia_client.member_show.side_effect = [
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'PENDING_UPDATE'},
+ {'provisioning_status': 'ACTIVE'},
+ ]
+ self.octavia_client.member_set.side_effect = [
+ exceptions.Conflict(409), None]
+ prop_diff = {
+ 'admin_state_up': False,
+ 'weight': 2,
+ }
+
+ prop_diff = self.member.handle_update(None, None, prop_diff)
+
+ self.assertFalse(self.member.check_update_complete(prop_diff))
+ self.assertFalse(self.member._update_called)
+ self.octavia_client.member_set.assert_called_with(
+ '123', '1234', json={'member': prop_diff})
+ self.assertFalse(self.member.check_update_complete(prop_diff))
+ self.assertTrue(self.member._update_called)
+ self.octavia_client.member_set.assert_called_with(
+ '123', '1234', json={'member': prop_diff})
+ self.assertFalse(self.member.check_update_complete(prop_diff))
+ self.assertTrue(self.member.check_update_complete(prop_diff))
+
+ def test_delete(self):
+ self._create_stack()
+ self.member.resource_id_set('1234')
+ self.octavia_client.member_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'PENDING_DELETE'},
+ {'provisioning_status': 'DELETED'},
+ ]
+ self.octavia_client.member_delete.side_effect = [
+ exceptions.Conflict(409),
+ None]
+
+ self.member.handle_delete()
+
+ self.assertFalse(self.member.check_delete_complete(None))
+ self.assertFalse(self.member._delete_called)
+ self.octavia_client.member_delete.assert_called_with('123',
+ '1234')
+ self.assertFalse(self.member.check_delete_complete(None))
+ self.octavia_client.member_delete.assert_called_with('123',
+ '1234')
+ self.assertTrue(self.member._delete_called)
+ self.assertTrue(self.member.check_delete_complete(None))
+
+ def test_delete_not_found(self):
+ self._create_stack()
+ self.member.resource_id_set('1234')
+ self.octavia_client.member_show.side_effect = [
+ {'provisioning_status': 'PENDING_DELETE'},
+ ]
+ self.octavia_client.member_delete.side_effect = [
+ exceptions.Conflict(409),
+ exceptions.NotFound(404)]
+
+ self.member.handle_delete()
+
+ self.assertFalse(self.member.check_delete_complete(None))
+ self.assertFalse(self.member._delete_called)
+ self.octavia_client.member_delete.assert_called_with('123',
+ '1234')
+ self.assertTrue(self.member.check_delete_complete(None))
+ self.octavia_client.member_delete.assert_called_with('123',
+ '1234')
+ self.assertFalse(self.member._delete_called)
+
+ def test_delete_failed(self):
+ self._create_stack()
+ self.member.resource_id_set('1234')
+ self.octavia_client.member_delete.side_effect = (
+ exceptions.Unauthorized(401))
+
+ self.member.handle_delete()
+
+ self.assertRaises(exceptions.Unauthorized,
+ self.member.check_delete_complete, None)
diff --git a/heat/tests/openstack/sahara/test_cluster.py b/heat/tests/openstack/sahara/test_cluster.py
index a09971254..f12d46f25 100644
--- a/heat/tests/openstack/sahara/test_cluster.py
+++ b/heat/tests/openstack/sahara/test_cluster.py
@@ -47,6 +47,25 @@ resources:
access_level: ro
"""
+# NOTE(jfreud): the resource name contains an invalid character
+cluster_stack_template_without_name = """
+heat_template_version: 2013-05-23
+description: Hadoop Cluster by Sahara
+resources:
+ lots_of_underscore_name:
+ type: OS::Sahara::Cluster
+ properties:
+ plugin_name: vanilla
+ hadoop_version: 2.3.0
+ cluster_template_id: some_cluster_template_id
+ default_image_id: some_image
+ key_name: admin
+ neutron_management_network: some_network
+ shares:
+ - id: some_share_id
+ access_level: ro
+"""
+
class FakeCluster(object):
def __init__(self, status='Active'):
@@ -79,10 +98,11 @@ class SaharaClusterTest(common.HeatTestCase):
self.fake_cl = FakeCluster()
self.t = template_format.parse(cluster_stack_template)
+ self.t2 = template_format.parse(cluster_stack_template_without_name)
- def _init_cluster(self, template):
+ def _init_cluster(self, template, name='super-cluster'):
self.stack = utils.parse_stack(template)
- cluster = self.stack['super-cluster']
+ cluster = self.stack[name]
return cluster
def _create_cluster(self, template):
@@ -110,6 +130,14 @@ class SaharaClusterTest(common.HeatTestCase):
**expected_kwargs)
self.cl_mgr.get.assert_called_once_with(self.fake_cl.id)
+ def test_cluster_create_invalid_name(self):
+ cluster = self._init_cluster(self.t2, 'lots_of_underscore_name')
+ self.cl_mgr.create.return_value = self.fake_cl
+ self.cl_mgr.get.return_value = self.fake_cl
+ scheduler.TaskRunner(cluster.create)()
+ name = self.cl_mgr.create.call_args[0][0]
+ self.assertIn('lotsofunderscorename', name)
+
def test_cluster_create_fails(self):
cfg.CONF.set_override('action_retry_limit', 0)
cluster = self._init_cluster(self.t)
diff --git a/heat/tests/openstack/senlin/test_cluster.py b/heat/tests/openstack/senlin/test_cluster.py
index 9f3467cb5..88656df44 100644
--- a/heat/tests/openstack/senlin/test_cluster.py
+++ b/heat/tests/openstack/senlin/test_cluster.py
@@ -26,7 +26,7 @@ from heat.engine import scheduler
from heat.engine import template
from heat.tests import common
from heat.tests import utils
-from senlinclient.common import exc
+from openstack import exceptions
cluster_stack_template = """
@@ -165,7 +165,7 @@ class SenlinClusterTest(common.HeatTestCase):
def test_cluster_delete_success(self):
cluster = self._create_cluster(self.t)
self.senlin_mock.get_cluster.side_effect = [
- exc.sdkexc.ResourceNotFound('SenlinCluster'),
+ exceptions.ResourceNotFound('SenlinCluster'),
]
scheduler.TaskRunner(cluster.delete)()
self.senlin_mock.delete_cluster.assert_called_once_with(
diff --git a/heat/tests/openstack/senlin/test_node.py b/heat/tests/openstack/senlin/test_node.py
index 7638a0492..3966fb265 100644
--- a/heat/tests/openstack/senlin/test_node.py
+++ b/heat/tests/openstack/senlin/test_node.py
@@ -25,7 +25,7 @@ from heat.engine import scheduler
from heat.engine import template
from heat.tests import common
from heat.tests import utils
-from senlinclient.common import exc
+from openstack import exceptions
node_stack_template = """
@@ -127,7 +127,7 @@ class SenlinNodeTest(common.HeatTestCase):
def test_node_delete_success(self):
node = self._create_node()
self.senlin_mock.get_node.side_effect = [
- exc.sdkexc.ResourceNotFound('SenlinNode'),
+ exceptions.ResourceNotFound('SenlinNode'),
]
scheduler.TaskRunner(node.delete)()
self.senlin_mock.delete_node.assert_called_once_with(
diff --git a/heat/tests/openstack/senlin/test_policy.py b/heat/tests/openstack/senlin/test_policy.py
index f2f8ad768..b8795550b 100644
--- a/heat/tests/openstack/senlin/test_policy.py
+++ b/heat/tests/openstack/senlin/test_policy.py
@@ -15,8 +15,8 @@
import copy
import mock
+from openstack import exceptions
from oslo_config import cfg
-from senlinclient.common import exc
from heat.common import exception
from heat.common import template_format
@@ -134,7 +134,7 @@ class SenlinPolicyTest(common.HeatTestCase):
'action': 'fake_action'}
policy = self._create_policy(self.t)
self.senlin_mock.get_policy.side_effect = [
- exc.sdkexc.ResourceNotFound('SenlinPolicy'),
+ exceptions.ResourceNotFound('SenlinPolicy'),
]
scheduler.TaskRunner(policy.delete)()
self.senlin_mock.cluster_detach_policy.assert_called_once_with(
@@ -145,10 +145,10 @@ class SenlinPolicyTest(common.HeatTestCase):
def test_policy_delete_not_attached(self):
policy = self._create_policy(self.t)
self.senlin_mock.get_policy.side_effect = [
- exc.sdkexc.ResourceNotFound('SenlinPolicy'),
+ exceptions.ResourceNotFound('SenlinPolicy'),
]
self.senlin_mock.cluster_detach_policy.side_effect = [
- exc.sdkexc.HttpException(http_status=400),
+ exceptions.HttpException(http_status=400),
]
scheduler.TaskRunner(policy.delete)()
self.senlin_mock.cluster_detach_policy.assert_called_once_with(
diff --git a/heat/tests/openstack/senlin/test_receiver.py b/heat/tests/openstack/senlin/test_receiver.py
index 8195e82b3..138de4c6b 100644
--- a/heat/tests/openstack/senlin/test_receiver.py
+++ b/heat/tests/openstack/senlin/test_receiver.py
@@ -14,7 +14,7 @@
import mock
-from senlinclient.common import exc
+from openstack import exceptions
from heat.common import template_format
from heat.engine.clients.os import senlin
@@ -106,7 +106,7 @@ class SenlinReceiverTest(common.HeatTestCase):
def test_recv_delete_not_found(self):
self.senlin_mock.delete_receiver.side_effect = [
- exc.sdkexc.ResourceNotFound(http_status=404)
+ exceptions.ResourceNotFound(http_status=404)
]
recv = self._create_recv(self.t)
scheduler.TaskRunner(recv.delete)()
diff --git a/heat/tests/openstack/trove/test_cluster.py b/heat/tests/openstack/trove/test_cluster.py
index 8e459bebf..1480bab28 100644
--- a/heat/tests/openstack/trove/test_cluster.py
+++ b/heat/tests/openstack/trove/test_cluster.py
@@ -18,6 +18,7 @@ from troveclient import exceptions as troveexc
from heat.common import exception
from heat.common import template_format
+from heat.engine.clients.os import neutron
from heat.engine.clients.os import trove
from heat.engine.resources.openstack.trove import cluster
from heat.engine import scheduler
@@ -38,10 +39,16 @@ resources:
instances:
- flavor: m1.heat
volume_size: 1
+ networks:
+ - port: port1
- flavor: m1.heat
volume_size: 1
+ networks:
+ - port: port2
- flavor: m1.heat
volume_size: 1
+ networks:
+ - port: port3
'''
@@ -86,6 +93,9 @@ class TroveClusterTest(common.HeatTestCase):
self.client = mock_client.return_value
self.troveclient = mock.Mock()
self.troveclient.flavors.get.return_value = FakeFlavor(1, 'm1.heat')
+ self.patchobject(neutron.NeutronClientPlugin,
+ 'find_resourceid_by_name_or_id',
+ return_value='someportid')
self.troveclient.datastore_versions.list.return_value = [FakeVersion()]
self.patchobject(trove.TroveClientPlugin, 'client',
return_value=self.troveclient)
@@ -106,9 +116,12 @@ class TroveClusterTest(common.HeatTestCase):
expected_state = (tc.CREATE, tc.COMPLETE)
self.assertEqual(expected_state, tc.state)
args = self.client.clusters.create.call_args[1]
- self.assertEqual([{'flavorRef': 1, 'volume': {'size': 1}},
- {'flavorRef': 1, 'volume': {'size': 1}},
- {'flavorRef': 1, 'volume': {'size': 1}}],
+ self.assertEqual([{'flavorRef': '1', 'volume': {'size': 1},
+ 'nics': [{'port-id': 'someportid'}]},
+ {'flavorRef': '1', 'volume': {'size': 1},
+ 'nics': [{'port-id': 'someportid'}]},
+ {'flavorRef': '1', 'volume': {'size': 1},
+ 'nics': [{'port-id': 'someportid'}]}],
args['instances'])
self.assertEqual('mongodb', args['datastore'])
self.assertEqual('2.6.1', args['datastore_version'])
diff --git a/heat/tests/openstack/zun/test_container.py b/heat/tests/openstack/zun/test_container.py
index d70c97c1c..f4be7b3d3 100644
--- a/heat/tests/openstack/zun/test_container.py
+++ b/heat/tests/openstack/zun/test_container.py
@@ -16,6 +16,7 @@ import mock
import six
from oslo_config import cfg
+from zunclient import exceptions as zc_exc
from heat.common import exception
from heat.common import template_format
@@ -46,6 +47,16 @@ resources:
restart_policy: on-failure:2
interactive: false
image_driver: docker
+ hints:
+ hintkey: hintval
+ hostname: myhost
+ security_groups:
+ - my_seg
+ mounts:
+ - volume_size: 1
+ mount_path: /data
+ - volume_id: 6ec29ba3-bf2c-4276-a88e-3670ea5abc80
+ mount_path: /data2
'''
@@ -68,6 +79,18 @@ class ZunContainerTest(common.HeatTestCase):
'Name': 'on-failure'}
self.fake_interactive = False
self.fake_image_driver = 'docker'
+ self.fake_hints = {'hintkey': 'hintval'}
+ self.fake_hostname = 'myhost'
+ self.fake_security_groups = ['my_seg']
+ self.fake_mounts = [
+ {'volume_id': None, 'volume_size': 1, 'mount_path': '/data'},
+ {'volume_id': '6ec29ba3-bf2c-4276-a88e-3670ea5abc80',
+ 'volume_size': None, 'mount_path': '/data2'}]
+ self.fake_mounts_args = [
+ {'size': 1, 'destination': '/data'},
+ {'source': '6ec29ba3-bf2c-4276-a88e-3670ea5abc80',
+ 'destination': '/data2'}]
+
self.fake_network_id = '9c11d847-99ce-4a83-82da-9827362a68e8'
self.fake_network_name = 'private'
self.fake_networks = {
@@ -101,6 +124,7 @@ class ZunContainerTest(common.HeatTestCase):
self.neutron_client = mock.Mock()
self.patchobject(container.Container, 'neutron',
return_value=self.neutron_client)
+ self.stub_VolumeConstraint_validate()
def _mock_get_client(self):
value = mock.MagicMock()
@@ -116,6 +140,9 @@ class ZunContainerTest(common.HeatTestCase):
value.restart_policy = self.fake_restart_policy
value.interactive = self.fake_interactive
value.image_driver = self.fake_image_driver
+ value.hints = self.fake_hints
+ value.hostname = self.fake_hostname
+ value.security_groups = self.fake_security_groups
value.addresses = self.fake_addresses
value.to_dict.return_value = value.__dict__
@@ -169,6 +196,18 @@ class ZunContainerTest(common.HeatTestCase):
self.assertEqual(
self.fake_image_driver,
c.properties.get(container.Container.IMAGE_DRIVER))
+ self.assertEqual(
+ self.fake_hints,
+ c.properties.get(container.Container.HINTS))
+ self.assertEqual(
+ self.fake_hostname,
+ c.properties.get(container.Container.HOSTNAME))
+ self.assertEqual(
+ self.fake_security_groups,
+ c.properties.get(container.Container.SECURITY_GROUPS))
+ self.assertEqual(
+ self.fake_mounts,
+ c.properties.get(container.Container.MOUNTS))
scheduler.TaskRunner(c.create)()
self.assertEqual(self.resource_id, c.resource_id)
@@ -186,7 +225,11 @@ class ZunContainerTest(common.HeatTestCase):
image_pull_policy=self.fake_image_policy,
restart_policy=self.fake_restart_policy,
interactive=self.fake_interactive,
- image_driver=self.fake_image_driver
+ image_driver=self.fake_image_driver,
+ hints=self.fake_hints,
+ hostname=self.fake_hostname,
+ security_groups=self.fake_security_groups,
+ mounts=self.fake_mounts_args,
)
def test_container_create_failed(self):
@@ -229,9 +272,12 @@ class ZunContainerTest(common.HeatTestCase):
def test_container_delete(self):
c = self._create_resource('container', self.rsrc_defn, self.stack)
scheduler.TaskRunner(c.create)()
+ self.patchobject(self.client.containers, 'get',
+ side_effect=[c, zc_exc.NotFound('Not Found')])
scheduler.TaskRunner(c.delete)()
self.assertEqual((c.DELETE, c.COMPLETE), c.state)
- self.assertEqual(1, self.client.containers.delete.call_count)
+ self.client.containers.delete.assert_called_once_with(
+ c.resource_id, stop=True)
def test_container_delete_not_found(self):
c = self._create_resource('container', self.rsrc_defn, self.stack)
@@ -240,7 +286,8 @@ class ZunContainerTest(common.HeatTestCase):
self.client.containers.delete.side_effect = Exception('Not Found')
scheduler.TaskRunner(c.delete)()
self.assertEqual((c.DELETE, c.COMPLETE), c.state)
- self.assertEqual(1, self.client.containers.delete.call_count)
+ self.client.containers.delete.assert_called_once_with(
+ c.resource_id, stop=True)
mock_ignore_not_found = c.client_plugin.return_value.ignore_not_found
self.assertEqual(1, mock_ignore_not_found.call_count)
diff --git a/heat/tests/policy/deny_stack_user.json b/heat/tests/policy/deny_stack_user.json
index 6c0fec87f..c20d2673f 100644
--- a/heat/tests/policy/deny_stack_user.json
+++ b/heat/tests/policy/deny_stack_user.json
@@ -12,16 +12,4 @@
"cloudformation:DescribeStackResource": "",
"cloudformation:DescribeStackResources": "rule:deny_stack_user",
"cloudformation:ListStackResources": "rule:deny_stack_user",
-
- "cloudwatch:DeleteAlarms": "rule:deny_stack_user",
- "cloudwatch:DescribeAlarmHistory": "rule:deny_stack_user",
- "cloudwatch:DescribeAlarms": "rule:deny_stack_user",
- "cloudwatch:DescribeAlarmsForMetric": "rule:deny_stack_user",
- "cloudwatch:DisableAlarmActions": "rule:deny_stack_user",
- "cloudwatch:EnableAlarmActions": "rule:deny_stack_user",
- "cloudwatch:GetMetricStatistics": "rule:deny_stack_user",
- "cloudwatch:ListMetrics": "rule:deny_stack_user",
- "cloudwatch:PutMetricAlarm": "rule:deny_stack_user",
- "cloudwatch:PutMetricData": "",
- "cloudwatch:SetAlarmState": "rule:deny_stack_user"
}
diff --git a/heat/tests/policy/resources.json b/heat/tests/policy/resources.json
index 566dac346..163fdb66e 100644
--- a/heat/tests/policy/resources.json
+++ b/heat/tests/policy/resources.json
@@ -1,7 +1,7 @@
{
"context_is_admin": "role:admin",
- "resource_types:OS::Test::AdminOnly": "rule:context_is_admin",
+ "resource_types:OS::Cinder::Quota": "!",
"resource_types:OS::Keystone::*": "rule:context_is_admin"
}
diff --git a/heat/tests/test_common_policy.py b/heat/tests/test_common_policy.py
index 672d0b963..ee311a809 100644
--- a/heat/tests/test_common_policy.py
+++ b/heat/tests/test_common_policy.py
@@ -43,20 +43,17 @@ class TestPolicyEnforcer(common.HeatTestCase):
super(TestPolicyEnforcer, self).setUp(mock_resource_policy=False)
self.fixture = self.useFixture(config_fixture.Config())
self.fixture.conf(args=['--config-dir', policy_path])
- self.addCleanup(self.m.VerifyAll)
def get_policy_file(self, filename):
return policy_path + filename
def test_policy_cfn_default(self):
- enforcer = policy.Enforcer(
- scope='cloudformation',
- policy_file=self.get_policy_file('deny_stack_user.json'))
+ enforcer = policy.Enforcer(scope='cloudformation')
ctx = utils.dummy_context(roles=[])
for action in self.cfn_actions:
# Everything should be allowed
- enforcer.enforce(ctx, action)
+ enforcer.enforce(ctx, action, is_registered_policy=True)
def test_policy_cfn_notallowed(self):
enforcer = policy.Enforcer(
@@ -67,100 +64,70 @@ class TestPolicyEnforcer(common.HeatTestCase):
for action in self.cfn_actions:
# Everything should raise the default exception.Forbidden
self.assertRaises(exception.Forbidden, enforcer.enforce, ctx,
- action, {})
+ action, {}, is_registered_policy=True)
def test_policy_cfn_deny_stack_user(self):
- enforcer = policy.Enforcer(
- scope='cloudformation',
- policy_file=self.get_policy_file('deny_stack_user.json'))
+ enforcer = policy.Enforcer(scope='cloudformation')
ctx = utils.dummy_context(roles=['heat_stack_user'])
for action in self.cfn_actions:
# Everything apart from DescribeStackResource should be Forbidden
if action == "DescribeStackResource":
- enforcer.enforce(ctx, action)
+ enforcer.enforce(ctx, action, is_registered_policy=True)
else:
self.assertRaises(exception.Forbidden, enforcer.enforce, ctx,
- action, {})
+ action, {}, is_registered_policy=True)
def test_policy_cfn_allow_non_stack_user(self):
- enforcer = policy.Enforcer(
- scope='cloudformation',
- policy_file=self.get_policy_file('deny_stack_user.json'))
+ enforcer = policy.Enforcer(scope='cloudformation')
ctx = utils.dummy_context(roles=['not_a_stack_user'])
for action in self.cfn_actions:
# Everything should be allowed
- enforcer.enforce(ctx, action)
-
- def test_policy_cw_deny_stack_user(self):
- enforcer = policy.Enforcer(
- scope='cloudwatch',
- policy_file=self.get_policy_file('deny_stack_user.json'))
-
- ctx = utils.dummy_context(roles=['heat_stack_user'])
- for action in self.cw_actions:
- # Everything apart from PutMetricData should be Forbidden
- if action == "PutMetricData":
- enforcer.enforce(ctx, action)
- else:
- self.assertRaises(exception.Forbidden, enforcer.enforce, ctx,
- action, {})
-
- def test_policy_cw_allow_non_stack_user(self):
- enforcer = policy.Enforcer(
- scope='cloudwatch',
- policy_file=self.get_policy_file('deny_stack_user.json'))
-
- ctx = utils.dummy_context(roles=['not_a_stack_user'])
- for action in self.cw_actions:
- # Everything should be allowed
- enforcer.enforce(ctx, action)
+ enforcer.enforce(ctx, action, is_registered_policy=True)
def test_set_rules_overwrite_true(self):
- enforcer = policy.Enforcer(
- policy_file=self.get_policy_file('deny_stack_user.json'))
+ enforcer = policy.Enforcer()
enforcer.load_rules(True)
enforcer.set_rules({'test_heat_rule': 1}, True)
self.assertEqual({'test_heat_rule': 1}, enforcer.enforcer.rules)
def test_set_rules_overwrite_false(self):
- enforcer = policy.Enforcer(
- policy_file=self.get_policy_file('deny_stack_user.json'))
+ enforcer = policy.Enforcer()
enforcer.load_rules(True)
enforcer.load_rules(True)
enforcer.set_rules({'test_heat_rule': 1}, False)
self.assertIn('test_heat_rule', enforcer.enforcer.rules)
def test_load_rules_force_reload_true(self):
- enforcer = policy.Enforcer(
- policy_file=self.get_policy_file('deny_stack_user.json'))
+ enforcer = policy.Enforcer()
enforcer.load_rules(True)
enforcer.set_rules({'test_heat_rule': 'test'})
enforcer.load_rules(True)
self.assertNotIn({'test_heat_rule': 'test'}, enforcer.enforcer.rules)
def test_load_rules_force_reload_false(self):
- enforcer = policy.Enforcer(
- policy_file=self.get_policy_file('deny_stack_user.json'))
+ enforcer = policy.Enforcer()
enforcer.load_rules(True)
enforcer.load_rules(True)
enforcer.set_rules({'test_heat_rule': 'test'})
enforcer.load_rules(False)
self.assertIn('test_heat_rule', enforcer.enforcer.rules)
- def test_default_rule(self):
+ def test_no_such_action(self):
ctx = utils.dummy_context(roles=['not_a_stack_user'])
- enforcer = policy.Enforcer(
- scope='cloudformation',
- policy_file=self.get_policy_file('deny_stack_user.json'),
- exc=None, default_rule='!')
+ enforcer = policy.Enforcer(scope='cloudformation')
action = 'no_such_action'
- self.assertFalse(enforcer.enforce(ctx, action))
+ msg = 'cloudformation:no_such_action has not been registered'
+ self.assertRaisesRegex(base_policy.PolicyNotRegistered,
+ msg,
+ enforcer.enforce,
+ ctx, action,
+ None, None,
+ True)
def test_check_admin(self):
- enforcer = policy.Enforcer(
- policy_file=self.get_policy_file('check_admin.json'))
+ enforcer = policy.Enforcer()
ctx = utils.dummy_context(roles=[])
self.assertFalse(enforcer.check_is_admin(ctx))
@@ -174,64 +141,74 @@ class TestPolicyEnforcer(common.HeatTestCase):
def test_enforce_creds(self):
enforcer = policy.Enforcer()
ctx = utils.dummy_context(roles=['admin'])
- self.m.StubOutWithMock(base_policy.Enforcer, 'enforce')
- base_policy.Enforcer.enforce('context_is_admin', {},
- ctx.to_policy_values(),
- False, exc=None).AndReturn(True)
- self.m.ReplayAll()
self.assertTrue(enforcer.check_is_admin(ctx))
def test_resource_default_rule(self):
context = utils.dummy_context(roles=['non-admin'])
- enforcer = policy.ResourceEnforcer(
- policy_file=self.get_policy_file('resources.json'))
+ enforcer = policy.ResourceEnforcer()
res_type = "OS::Test::NotInPolicy"
- self.assertTrue(enforcer.enforce(context, res_type))
+ self.assertTrue(enforcer.enforce(context, res_type,
+ is_registered_policy=True))
def test_resource_enforce_success(self):
context = utils.dummy_context(roles=['admin'])
- enforcer = policy.ResourceEnforcer(
- policy_file=self.get_policy_file('resources.json'))
- res_type = "OS::Test::AdminOnly"
- self.assertTrue(enforcer.enforce(context, res_type))
+ enforcer = policy.ResourceEnforcer()
+ res_type = "OS::Keystone::User"
+ self.assertTrue(enforcer.enforce(context, res_type,
+ is_registered_policy=True))
def test_resource_enforce_fail(self):
context = utils.dummy_context(roles=['non-admin'])
- enforcer = policy.ResourceEnforcer(
- policy_file=self.get_policy_file('resources.json'))
- res_type = "OS::Test::AdminOnly"
+ enforcer = policy.ResourceEnforcer()
+ res_type = "OS::Nova::Quota"
ex = self.assertRaises(exception.Forbidden,
enforcer.enforce,
- context, res_type)
+ context, res_type,
+ None, None,
+ True)
self.assertIn(res_type, ex.message)
def test_resource_wildcard_enforce_fail(self):
context = utils.dummy_context(roles=['non-admin'])
- enforcer = policy.ResourceEnforcer(
- policy_file=self.get_policy_file('resources.json'))
+ enforcer = policy.ResourceEnforcer()
res_type = "OS::Keystone::User"
ex = self.assertRaises(exception.Forbidden,
enforcer.enforce,
- context, res_type)
+ context, res_type,
+ None, None,
+ True)
+
self.assertIn(res_type.split("::", 1)[0], ex.message)
def test_resource_enforce_returns_false(self):
context = utils.dummy_context(roles=['non-admin'])
- enforcer = policy.ResourceEnforcer(
- policy_file=self.get_policy_file('resources.json'),
- exc=None)
- res_type = "OS::Test::AdminOnly"
- self.assertFalse(enforcer.enforce(context, res_type))
- self.assertIsNotNone(enforcer.enforce(context, res_type))
+ enforcer = policy.ResourceEnforcer(exc=None)
+ res_type = "OS::Keystone::User"
+ self.assertFalse(enforcer.enforce(context, res_type,
+ is_registered_policy=True))
+ self.assertIsNotNone(enforcer.enforce(context, res_type,
+ is_registered_policy=True))
def test_resource_enforce_exc_on_false(self):
context = utils.dummy_context(roles=['non-admin'])
+ enforcer = policy.ResourceEnforcer()
+ res_type = "OS::Keystone::User"
+ ex = self.assertRaises(exception.Forbidden,
+ enforcer.enforce,
+ context, res_type,
+ None, None,
+ True)
+
+ self.assertIn(res_type, ex.message)
+
+ def test_resource_enforce_override_deny_admin(self):
+ context = utils.dummy_context(roles=['admin'])
enforcer = policy.ResourceEnforcer(
policy_file=self.get_policy_file('resources.json'))
- res_type = "OS::Test::AdminOnly"
- self.patchobject(base_policy.Enforcer, 'enforce',
- return_value=False)
+ res_type = "OS::Cinder::Quota"
ex = self.assertRaises(exception.Forbidden,
enforcer.enforce,
- context, res_type)
+ context, res_type,
+ None, None,
+ True)
self.assertIn(res_type, ex.message)
diff --git a/heat/tests/test_convg_stack.py b/heat/tests/test_convg_stack.py
index afb3bef5e..191a7b3e8 100644
--- a/heat/tests/test_convg_stack.py
+++ b/heat/tests/test_convg_stack.py
@@ -65,9 +65,9 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
self.assertTrue(stack_db.convergence)
self.assertEqual({'edges': [[[1, True], None]]}, stack_db.current_deps)
- leaves = stack.convergence_dependencies.leaves()
+ leaves = set(stack.convergence_dependencies.leaves())
expected_calls = []
- for rsrc_id, is_update in leaves:
+ for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
@@ -121,9 +121,9 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
self.assertIsNotNone(sync_point)
self.assertEqual(stack_db.id, sync_point.stack_id)
- leaves = stack.convergence_dependencies.leaves()
+ leaves = set(stack.convergence_dependencies.leaves())
expected_calls = []
- for rsrc_id, is_update in leaves:
+ for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
@@ -261,17 +261,17 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
self.assertIsNotNone(sync_point)
self.assertEqual(stack_db.id, sync_point.stack_id)
- leaves = stack.convergence_dependencies.leaves()
+ leaves = set(stack.convergence_dependencies.leaves())
expected_calls = []
- for rsrc_id, is_update in leaves:
+ for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
{'input_data': {}},
is_update, None, False))
- leaves = curr_stack.convergence_dependencies.leaves()
- for rsrc_id, is_update in leaves:
+ leaves = set(curr_stack.convergence_dependencies.leaves())
+ for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
curr_stack.context, rsrc_id, curr_stack.current_traversal,
@@ -344,17 +344,17 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
self.assertIsNotNone(sync_point, 'entity %s' % entity_id)
self.assertEqual(stack_db.id, sync_point.stack_id)
- leaves = stack.convergence_dependencies.leaves()
+ leaves = set(stack.convergence_dependencies.leaves())
expected_calls = []
- for rsrc_id, is_update in leaves:
+ for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
stack.context, rsrc_id, stack.current_traversal,
{'input_data': {}},
is_update, None, False))
- leaves = curr_stack.convergence_dependencies.leaves()
- for rsrc_id, is_update in leaves:
+ leaves = set(curr_stack.convergence_dependencies.leaves())
+ for rsrc_id, is_update in sorted(leaves, key=lambda n: n.is_update):
expected_calls.append(
mock.call.worker_client.WorkerClient.check_resource(
curr_stack.context, rsrc_id, curr_stack.current_traversal,
diff --git a/heat/tests/test_engine_service.py b/heat/tests/test_engine_service.py
index 743e97e05..854979c27 100644
--- a/heat/tests/test_engine_service.py
+++ b/heat/tests/test_engine_service.py
@@ -1328,7 +1328,7 @@ class StackServiceTest(common.HeatTestCase):
self.eng.reset_stack_status()
- mock_admin_context.assert_called_once_with()
+ mock_admin_context.assert_called()
filters = {
'status': parser.Stack.IN_PROGRESS,
'convergence': False
diff --git a/heat/tests/test_engine_service_stack_watch.py b/heat/tests/test_engine_service_stack_watch.py
deleted file mode 100644
index 3ff191609..000000000
--- a/heat/tests/test_engine_service_stack_watch.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from heat.engine import service_stack_watch
-from heat.rpc import api as rpc_api
-from heat.tests import common
-from heat.tests import utils
-
-
-class StackServiceWatcherTest(common.HeatTestCase):
-
- def setUp(self):
- super(StackServiceWatcherTest, self).setUp()
- self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant')
-
- @mock.patch.object(service_stack_watch.stack_object.Stack,
- 'get_all_by_owner_id')
- @mock.patch.object(service_stack_watch.watch_rule_object.WatchRule,
- 'get_all_by_stack')
- @mock.patch.object(service_stack_watch.watch_rule_object.WatchRule,
- 'update_by_id')
- def test_periodic_watch_task_not_created(self, watch_rule_update,
- watch_rule_get_all_by_stack,
- stack_get_all_by_owner_id):
- """Test case for not creating periodic task for cloud watch lite alarm.
-
- If there is no cloud watch lite alarm, then don't create a periodic
- task for it.
- """
- stack_id = 83
- watch_rule_get_all_by_stack.return_value = []
- stack_get_all_by_owner_id.return_value = []
- tg = mock.Mock()
- sw = service_stack_watch.StackWatch(tg)
- sw.start_watch_task(stack_id, self.ctx)
-
- # assert that add_timer is NOT called.
- self.assertEqual([], tg.add_timer.call_args_list)
-
- @mock.patch.object(service_stack_watch.stack_object.Stack,
- 'get_all_by_owner_id')
- @mock.patch.object(service_stack_watch.watch_rule_object.WatchRule,
- 'get_all_by_stack')
- @mock.patch.object(service_stack_watch.watch_rule_object.WatchRule,
- 'update_by_id')
- def test_periodic_watch_task_created(self, watch_rule_update,
- watch_rule_get_all_by_stack,
- stack_get_all_by_owner_id):
- """Test case for creating periodic task for cloud watch lite alarm.
-
- If there is no cloud watch lite alarm, then DO create a periodic task
- for it.
- """
- stack_id = 86
- wr1 = mock.Mock()
- wr1.id = 4
- wr1.state = rpc_api.WATCH_STATE_NODATA
-
- watch_rule_get_all_by_stack.return_value = [wr1]
- stack_get_all_by_owner_id.return_value = []
- tg = mock.Mock()
- sw = service_stack_watch.StackWatch(tg)
- sw.start_watch_task(stack_id, self.ctx)
-
- # assert that add_timer IS called.
- self.assertEqual([mock.call(stack_id, sw.periodic_watcher_task,
- sid=stack_id)],
- tg.add_timer.call_args_list)
-
- @mock.patch.object(service_stack_watch.stack_object.Stack,
- 'get_all_by_owner_id')
- @mock.patch.object(service_stack_watch.watch_rule_object.WatchRule,
- 'get_all_by_stack')
- @mock.patch.object(service_stack_watch.watch_rule_object.WatchRule,
- 'update_by_id')
- def test_periodic_watch_task_created_nested(self, watch_rule_update,
- watch_rule_get_all_by_stack,
- stack_get_all_by_owner_id):
- stack_id = 90
-
- def my_wr_get(cnxt, sid):
- if sid == stack_id:
- return []
- wr1 = mock.Mock()
- wr1.id = 4
- wr1.state = rpc_api.WATCH_STATE_NODATA
- return [wr1]
-
- watch_rule_get_all_by_stack.side_effect = my_wr_get
-
- def my_nested_get(cnxt, sid):
- if sid == stack_id:
- nested_stack = mock.Mock()
- nested_stack.id = 55
- return [nested_stack]
- return []
-
- stack_get_all_by_owner_id.side_effect = my_nested_get
- tg = mock.Mock()
- sw = service_stack_watch.StackWatch(tg)
- sw.start_watch_task(stack_id, self.ctx)
-
- # assert that add_timer IS called.
- self.assertEqual([mock.call(stack_id, sw.periodic_watcher_task,
- sid=stack_id)],
- tg.add_timer.call_args_list)
diff --git a/heat/tests/test_environment.py b/heat/tests/test_environment.py
index 46efdbe94..fc96afb86 100644
--- a/heat/tests/test_environment.py
+++ b/heat/tests/test_environment.py
@@ -844,7 +844,7 @@ class ResourceRegistryTest(common.HeatTestCase):
def test_list_type_with_invalid_type_name(self):
registry = resources.global_env().registry
- types = registry.get_types(type_name="r'[^\+]'")
+ types = registry.get_types(type_name="r'[^\\+]'")
self.assertEqual([], types)
def test_list_type_with_version(self):
diff --git a/heat/tests/test_fault_middleware.py b/heat/tests/test_fault_middleware.py
index 3292bb668..37b0316af 100644
--- a/heat/tests/test_fault_middleware.py
+++ b/heat/tests/test_fault_middleware.py
@@ -174,7 +174,7 @@ class FaultMiddlewareTest(common.HeatTestCase):
if hasattr(obj, 'msg_fmt'):
kwargs = {}
- spec_names = re.findall('%\((\w+)\)([cdeEfFgGinorsxX])',
+ spec_names = re.findall(r'%\((\w+)\)([cdeEfFgGinorsxX])',
obj.msg_fmt)
for key, convtype in spec_names:
diff --git a/heat/tests/test_function.py b/heat/tests/test_function.py
index 3188937b6..2073ede4b 100644
--- a/heat/tests/test_function.py
+++ b/heat/tests/test_function.py
@@ -180,7 +180,7 @@ class ValidateTest(common.HeatTestCase):
self.assertIsNone(function.validate(self.func))
self.func = TestFunction(None, 'foo', ['bar'])
self.assertRaisesRegex(exception.StackValidationFailed,
- '.foo: Need more arguments',
+ 'foo: Need more arguments',
function.validate, self.func)
def test_validate_dict(self):
@@ -190,7 +190,7 @@ class ValidateTest(common.HeatTestCase):
self.func = TestFunction(None, 'foo', ['bar'])
snippet = {'foo': 'bar', 'blarg': self.func}
self.assertRaisesRegex(exception.StackValidationFailed,
- '.blarg.foo: Need more arguments',
+ 'blarg.foo: Need more arguments',
function.validate, snippet)
def test_validate_list(self):
@@ -200,7 +200,7 @@ class ValidateTest(common.HeatTestCase):
self.func = TestFunction(None, 'foo', ['bar'])
snippet = {'foo': 'bar', 'blarg': self.func}
self.assertRaisesRegex(exception.StackValidationFailed,
- '.blarg.foo: Need more arguments',
+ 'blarg.foo: Need more arguments',
function.validate, snippet)
def test_validate_all(self):
@@ -210,7 +210,7 @@ class ValidateTest(common.HeatTestCase):
self.func = TestFunction(None, 'foo', ['bar'])
snippet = {'foo': 'bar', 'blarg': self.func}
self.assertRaisesRegex(exception.StackValidationFailed,
- '.blarg.foo: Need more arguments',
+ 'blarg.foo: Need more arguments',
function.validate, snippet)
diff --git a/heat/tests/test_grouputils.py b/heat/tests/test_grouputils.py
index c1663eb3a..428abf93b 100644
--- a/heat/tests/test_grouputils.py
+++ b/heat/tests/test_grouputils.py
@@ -15,8 +15,9 @@ import mock
import six
from heat.common import grouputils
+from heat.common import identifier
from heat.common import template_format
-from heat.engine import rsrc_defn
+from heat.rpc import client as rpc_client
from heat.tests import common
from heat.tests import utils
@@ -34,7 +35,8 @@ class GroupUtilsTest(common.HeatTestCase):
def test_non_nested_resource(self):
group = mock.Mock()
- self.patchobject(group, 'nested', return_value=None)
+ group.nested_identifier.return_value = None
+ group.nested.return_value = None
self.assertEqual(0, grouputils.get_size(group))
self.assertEqual([], grouputils.get_members(group))
@@ -45,9 +47,7 @@ class GroupUtilsTest(common.HeatTestCase):
group = mock.Mock()
t = template_format.parse(nested_stack)
stack = utils.parse_stack(t)
- # group size
- self.patchobject(group, 'nested', return_value=stack)
- self.assertEqual(2, grouputils.get_size(group))
+ group.nested.return_value = stack
# member list (sorted)
members = [r for r in six.itervalues(stack)]
@@ -61,18 +61,6 @@ class GroupUtilsTest(common.HeatTestCase):
partial_ids = grouputils.get_member_refids(group, exclude=['ID-r1'])
self.assertEqual(['ID-r0'], partial_ids)
- # names
- names = grouputils.get_member_names(group)
- self.assertEqual(['r0', 'r1'], names)
-
- # defn snippets as list
- expected = rsrc_defn.ResourceDefinition(
- None,
- "OverwrittenFnGetRefIdType")
-
- member_defs = grouputils.get_member_definitions(group)
- self.assertEqual([(x, expected) for x in names], member_defs)
-
def test_group_with_failed_members(self):
group = mock.Mock()
t = template_format.parse(nested_stack)
@@ -84,7 +72,189 @@ class GroupUtilsTest(common.HeatTestCase):
rsrc_err.status = rsrc_err.FAILED
rsrc_ok = stack.resources['r1']
- self.assertEqual(1, grouputils.get_size(group))
self.assertEqual([rsrc_ok], grouputils.get_members(group))
self.assertEqual(['ID-r1'], grouputils.get_member_refids(group))
- self.assertEqual(['r1'], grouputils.get_member_names(group))
+
+
+class GroupInspectorTest(common.HeatTestCase):
+ resources = [
+ {
+ 'updated_time': '2018-01-01T12:00',
+ 'creation_time': '2018-01-01T02:00',
+ 'resource_name': 'A',
+ 'physical_resource_id': 'a',
+ 'resource_action': 'UPDATE',
+ 'resource_status': 'COMPLETE',
+ 'resource_status_reason': 'resource changed',
+ 'resource_type': 'OS::Heat::Test',
+ 'resource_id': 'aaaaaaaa',
+ 'stack_identity': 'bar',
+ 'stack_name': 'nested_test',
+ 'required_by': [],
+ 'parent_resource': 'stack_resource',
+ },
+ {
+ 'updated_time': '2018-01-01T10:00',
+ 'creation_time': '2018-01-01T03:00',
+ 'resource_name': 'E',
+ 'physical_resource_id': 'e',
+ 'resource_action': 'UPDATE',
+ 'resource_status': 'FAILED',
+ 'resource_status_reason': 'reasons',
+ 'resource_type': 'OS::Heat::Test',
+ 'resource_id': 'eeeeeeee',
+ 'stack_identity': 'bar',
+ 'stack_name': 'nested_test',
+ 'required_by': [],
+ 'parent_resource': 'stack_resource',
+ },
+ {
+ 'updated_time': '2018-01-01T11:00',
+ 'creation_time': '2018-01-01T03:00',
+ 'resource_name': 'B',
+ 'physical_resource_id': 'b',
+ 'resource_action': 'UPDATE',
+ 'resource_status': 'FAILED',
+ 'resource_status_reason': 'reasons',
+ 'resource_type': 'OS::Heat::Test',
+ 'resource_id': 'bbbbbbbb',
+ 'stack_identity': 'bar',
+ 'stack_name': 'nested_test',
+ 'required_by': [],
+ 'parent_resource': 'stack_resource',
+ },
+ {
+ 'updated_time': '2018-01-01T13:00',
+ 'creation_time': '2018-01-01T01:00',
+ 'resource_name': 'C',
+ 'physical_resource_id': 'c',
+ 'resource_action': 'UPDATE',
+ 'resource_status': 'COMPLETE',
+ 'resource_status_reason': 'resource changed',
+ 'resource_type': 'OS::Heat::Test',
+ 'resource_id': 'cccccccc',
+ 'stack_identity': 'bar',
+ 'stack_name': 'nested_test',
+ 'required_by': [],
+ 'parent_resource': 'stack_resource',
+ },
+ {
+ 'updated_time': '2018-01-01T04:00',
+ 'creation_time': '2018-01-01T04:00',
+ 'resource_name': 'F',
+ 'physical_resource_id': 'f',
+ 'resource_action': 'CREATE',
+ 'resource_status': 'COMPLETE',
+ 'resource_status_reason': 'resource changed',
+ 'resource_type': 'OS::Heat::Test',
+ 'resource_id': 'ffffffff',
+ 'stack_identity': 'bar',
+ 'stack_name': 'nested_test',
+ 'required_by': [],
+ 'parent_resource': 'stack_resource',
+ },
+ {
+ 'updated_time': '2018-01-01T04:00',
+ 'creation_time': '2018-01-01T04:00',
+ 'resource_name': 'D',
+ 'physical_resource_id': 'd',
+ 'resource_action': 'CREATE',
+ 'resource_status': 'COMPLETE',
+ 'resource_status_reason': 'resource changed',
+ 'resource_type': 'OS::Heat::Test',
+ 'resource_id': 'dddddddd',
+ 'stack_identity': 'bar',
+ 'stack_name': 'nested_test',
+ 'required_by': [],
+ 'parent_resource': 'stack_resource',
+ },
+ ]
+
+ template = {
+ 'heat_template_version': 'newton',
+ 'resources': {
+ 'A': {
+ 'type': 'OS::Heat::TestResource',
+ },
+ },
+ }
+
+ def setUp(self):
+ super(GroupInspectorTest, self).setUp()
+ self.ctx = mock.Mock()
+ self.rpc_client = mock.Mock(spec=rpc_client.EngineClient)
+ self.identity = identifier.HeatIdentifier('foo', 'nested_test', 'bar')
+
+ self.list_rsrcs = self.rpc_client.list_stack_resources
+ self.get_tmpl = self.rpc_client.get_template
+
+ self.insp = grouputils.GroupInspector(self.ctx, self.rpc_client,
+ self.identity)
+
+ def test_no_identity(self):
+ self.insp = grouputils.GroupInspector(self.ctx, self.rpc_client, None)
+
+ self.assertEqual(0, self.insp.size(include_failed=True))
+ self.assertEqual([], list(self.insp.member_names(include_failed=True)))
+ self.assertIsNone(self.insp.template())
+
+ self.list_rsrcs.assert_not_called()
+ self.get_tmpl.assert_not_called()
+
+ def test_size_include_failed(self):
+ self.list_rsrcs.return_value = self.resources
+
+ self.assertEqual(6, self.insp.size(include_failed=True))
+
+ self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
+
+ def test_size_exclude_failed(self):
+ self.list_rsrcs.return_value = self.resources
+
+ self.assertEqual(4, self.insp.size(include_failed=False))
+
+ self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
+
+ def test_member_names_include_failed(self):
+ self.list_rsrcs.return_value = self.resources
+
+ self.assertEqual(['B', 'E', 'C', 'A', 'D', 'F'],
+ list(self.insp.member_names(include_failed=True)))
+
+ self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
+
+ def test_member_names_exclude_failed(self):
+ self.list_rsrcs.return_value = self.resources
+
+ self.assertEqual(['C', 'A', 'D', 'F'],
+ list(self.insp.member_names(include_failed=False)))
+
+ self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
+
+ def test_list_rsrc_caching(self):
+ self.list_rsrcs.return_value = self.resources
+
+ self.insp.size(include_failed=False)
+ list(self.insp.member_names(include_failed=True))
+ self.insp.size(include_failed=True)
+ list(self.insp.member_names(include_failed=False))
+
+ self.list_rsrcs.assert_called_once_with(self.ctx, dict(self.identity))
+ self.get_tmpl.assert_not_called()
+
+ def test_get_template(self):
+ self.get_tmpl.return_value = self.template
+
+ tmpl = self.insp.template()
+ self.assertEqual(self.template, tmpl.t)
+
+ self.get_tmpl.assert_called_once_with(self.ctx, dict(self.identity))
+
+ def test_get_tmpl_caching(self):
+ self.get_tmpl.return_value = self.template
+
+ self.insp.template()
+ self.insp.template()
+
+ self.get_tmpl.assert_called_once_with(self.ctx, dict(self.identity))
+ self.list_rsrcs.assert_not_called()
diff --git a/heat/tests/test_hot.py b/heat/tests/test_hot.py
index da5965fa4..ae01479e5 100644
--- a/heat/tests/test_hot.py
+++ b/heat/tests/test_hot.py
@@ -864,7 +864,7 @@ class HOTemplateTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
- self.assertIn('.str_replace: "str_replace" parameters must be a'
+ self.assertIn('str_replace: "str_replace" parameters must be a'
' mapping', six.text_type(ex))
def test_str_replace_invalid_param_type_init(self):
@@ -1027,13 +1027,13 @@ class HOTemplateTest(common.HeatTestCase):
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
- self.assertIn('.list_join: Incorrect arguments to "list_join"',
+ self.assertIn('list_join: Incorrect arguments to "list_join"',
six.text_type(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
- self.assertIn('.list_join: Incorrect arguments to "list_join"',
+ self.assertIn('list_join: Incorrect arguments to "list_join"',
six.text_type(exc1))
def test_join_int_invalid(self):
@@ -1041,25 +1041,25 @@ class HOTemplateTest(common.HeatTestCase):
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
- self.assertIn('.list_join: Incorrect arguments', six.text_type(exc))
+ self.assertIn('list_join: Incorrect arguments', six.text_type(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
- self.assertIn('.list_join: Incorrect arguments', six.text_type(exc1))
+ self.assertIn('list_join: Incorrect arguments', six.text_type(exc1))
def test_join_invalid_value(self):
snippet = {'list_join': [',']}
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
- self.assertIn('.list_join: Incorrect arguments to "list_join"',
+ self.assertIn('list_join: Incorrect arguments to "list_join"',
six.text_type(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
- self.assertIn('.list_join: Incorrect arguments to "list_join"',
+ self.assertIn('list_join: Incorrect arguments to "list_join"',
six.text_type(exc1))
def test_join_invalid_multiple(self):
@@ -1105,6 +1105,26 @@ class HOTemplateTest(common.HeatTestCase):
self.assertEqual({}, resolved)
+ def test_merge_containing_repeat_multi_list_no_nested_loop_with_none(self):
+ snippet = {'map_merge': {'repeat': {
+ 'template': {'ROLE': 'ROLE', 'NAME': 'NAME'},
+ 'for_each': {'ROLE': None, 'NAME': ['n1', 'n2']},
+ 'permutations': False}}}
+ tmpl = template.Template(hot_mitaka_tpl_empty)
+ resolved = self.resolve(snippet, tmpl)
+
+ self.assertEqual({}, resolved)
+
+ def test_merge_containing_repeat_multi_list_no_nested_loop_all_none(self):
+ snippet = {'map_merge': {'repeat': {
+ 'template': {'ROLE': 'ROLE', 'NAME': 'NAME'},
+ 'for_each': {'ROLE': None, 'NAME': None},
+ 'permutations': False}}}
+ tmpl = template.Template(hot_mitaka_tpl_empty)
+ resolved = self.resolve(snippet, tmpl)
+
+ self.assertEqual({}, resolved)
+
def test_map_replace(self):
snippet = {'map_replace': [{'f1': 'b1', 'f2': 'b2'},
{'keys': {'f1': 'F1'},
@@ -1269,7 +1289,7 @@ class HOTemplateTest(common.HeatTestCase):
def test_yaql_non_map_args(self):
snippet = {'yaql': 'invalid'}
tmpl = template.Template(hot_newton_tpl_empty)
- msg = '.yaql: Arguments to "yaql" must be a map.'
+ msg = 'yaql: Arguments to "yaql" must be a map.'
self.assertRaisesRegex(exception.StackValidationFailed,
msg, self.resolve, snippet, tmpl)
@@ -1278,7 +1298,7 @@ class HOTemplateTest(common.HeatTestCase):
'data': {'var1': [1, 2, 3, 4]}}}
tmpl = template.Template(hot_newton_tpl_empty)
yaql = tmpl.parse(None, snippet)
- regxp = ('.yaql: Bad expression Parse error: unexpected end '
+ regxp = ('yaql: Bad expression Parse error: unexpected end '
'of statement.')
self.assertRaisesRegex(exception.StackValidationFailed, regxp,
function.validate, yaql)
@@ -1363,7 +1383,7 @@ class HOTemplateTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
- error_msg = ('.equals: Arguments to "equals" must be '
+ error_msg = ('equals: Arguments to "equals" must be '
'of the form: [value_1, value_2]')
self.assertIn(error_msg, six.text_type(exc))
@@ -1715,7 +1735,7 @@ resources:
snippet = {'repeat': {'template': 'this is %var%',
'for_each': '%var%'}}
repeat = tmpl.parse(None, snippet)
- regxp = ('.repeat: The "for_each" argument to "repeat" '
+ regxp = ('repeat: The "for_each" argument to "repeat" '
'must contain a map')
self.assertRaisesRegex(exception.StackValidationFailed, regxp,
function.validate, repeat)
@@ -1968,7 +1988,7 @@ resources:
snippet = {'Fn::GetAZs': ''}
stack = parser.Stack(utils.dummy_context(), 'test_stack',
template.Template(hot_juno_tpl_empty))
- regxp = '.Fn::GetAZs: The template version is invalid'
+ regxp = 'Fn::GetAZs: The template version is invalid'
self.assertRaisesRegex(exception.StackValidationFailed,
regxp,
function.validate,
diff --git a/heat/tests/test_metadata_refresh.py b/heat/tests/test_metadata_refresh.py
index 1741fe28a..b638f3d36 100644
--- a/heat/tests/test_metadata_refresh.py
+++ b/heat/tests/test_metadata_refresh.py
@@ -219,7 +219,6 @@ class WaitConditionMetadataUpdateTest(common.HeatTestCase):
def setUp(self):
super(WaitConditionMetadataUpdateTest, self).setUp()
self.man = service.EngineService('a-host', 'a-topic')
- self.man.create_periodic_tasks()
@mock.patch.object(nova.NovaClientPlugin, 'find_flavor_by_name_or_id')
@mock.patch.object(glance.GlanceClientPlugin, 'find_image_by_name_or_id')
diff --git a/heat/tests/test_properties.py b/heat/tests/test_properties.py
index 5acbc146f..f0b6d89dd 100644
--- a/heat/tests/test_properties.py
+++ b/heat/tests/test_properties.py
@@ -784,8 +784,9 @@ class PropertyTest(common.HeatTestCase):
# python 3.4.3 returns another error message
# try to handle this by regexp
self.assertRaisesRegex(
- TypeError, "int\(\) argument must be a string(, a bytes-like "
- "object)? or a number, not 'list'", p.get_value, [1])
+ TypeError, r"int\(\) argument must be a string"
+ "(, a bytes-like object)?"
+ " or a number, not 'list'", p.get_value, [1])
def test_str_from_int(self):
schema = {'Type': 'String'}
@@ -1686,15 +1687,15 @@ class PropertiesValidationTest(common.HeatTestCase):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'foo': ['foo', 'bar']})
ex = self.assertRaises(exception.StackValidationFailed, props.validate)
- self.assertEqual('Property error: foo: Value must be a string',
- six.text_type(ex))
+ self.assertIn('Property error: foo: Value must be a string',
+ six.text_type(ex))
def test_dict_instead_string(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'foo': {'foo': 'bar'}})
ex = self.assertRaises(exception.StackValidationFailed, props.validate)
- self.assertEqual('Property error: foo: Value must be a string',
- six.text_type(ex))
+ self.assertIn('Property error: foo: Value must be a string',
+ six.text_type(ex))
def test_none_string(self):
schema = {'foo': {'Type': 'String'}}
diff --git a/heat/tests/test_provider_template.py b/heat/tests/test_provider_template.py
index 8de20c782..1f1f6cc5e 100644
--- a/heat/tests/test_provider_template.py
+++ b/heat/tests/test_provider_template.py
@@ -311,7 +311,6 @@ class ProviderTemplateTest(common.HeatTestCase):
temp_res = template_resource.TemplateResource('test_t_res',
definition, stack)
temp_res.resource_id = 'dummy_id'
- self.assertIsNone(temp_res.validate())
temp_res.nested_identifier = mock.Mock()
temp_res.nested_identifier.return_value = {'foo': 'bar'}
@@ -319,6 +318,8 @@ class ProviderTemplateTest(common.HeatTestCase):
output = {'outputs': [{'output_key': 'Foo', 'output_value': None,
'output_error': 'it is all bad'}]}
temp_res._rpc_client.show_stack.return_value = [output]
+ temp_res._rpc_client.list_stack_resources.return_value = []
+ self.assertIsNone(temp_res.validate())
self.assertRaises(exception.TemplateOutputError,
temp_res.FnGetAtt, 'Foo')
diff --git a/heat/tests/test_resource.py b/heat/tests/test_resource.py
index ba8f44b8e..82e20e954 100644
--- a/heat/tests/test_resource.py
+++ b/heat/tests/test_resource.py
@@ -2409,7 +2409,7 @@ class ResourceTest(common.HeatTestCase):
self.assertEqual(stack.t.id, res.current_template_id)
# ensure that requires was not updated
self.assertItemsEqual([2], res.requires)
- self._assert_resource_lock(res.id, None, None)
+ self._assert_resource_lock(res.id, None, 2)
def test_convergence_update_replace_rollback(self):
rsrc_def = rsrc_defn.ResourceDefinition('test_res',
diff --git a/heat/tests/test_rpc_client.py b/heat/tests/test_rpc_client.py
index bb077e908..7aa7fc351 100644
--- a/heat/tests/test_rpc_client.py
+++ b/heat/tests/test_rpc_client.py
@@ -301,23 +301,6 @@ class EngineRpcAPITestCase(common.HeatTestCase):
details={u'wordpress': []},
sync_call=True)
- def test_create_watch_data(self):
- self._test_engine_api('create_watch_data', 'call',
- watch_name='watch1',
- stats_data={})
-
- def test_show_watch(self):
- self._test_engine_api('show_watch', 'call',
- watch_name='watch1')
-
- def test_show_watch_metric(self):
- self._test_engine_api('show_watch_metric', 'call',
- metric_namespace=None, metric_name=None)
-
- def test_set_watch_state(self):
- self._test_engine_api('set_watch_state', 'call',
- watch_name='watch1', state="xyz")
-
def test_list_software_configs(self):
self._test_engine_api('list_software_configs', 'call',
limit=mock.ANY, marker=mock.ANY)
diff --git a/heat/tests/test_signal.py b/heat/tests/test_signal.py
index ec4f17e10..87e54c9b7 100644
--- a/heat/tests/test_signal.py
+++ b/heat/tests/test_signal.py
@@ -537,9 +537,6 @@ class SignalTest(common.HeatTestCase):
'previous': 'SUCCESS'}
ceilo_expected = 'alarm state changed from SUCCESS to foo (apples)'
- watch_details = {'state': 'go_for_it'}
- watch_expected = 'alarm state changed to go_for_it'
-
str_details = 'a string details'
str_expected = str_details
@@ -547,13 +544,11 @@ class SignalTest(common.HeatTestCase):
none_expected = 'No signal details provided'
# Test
- for test_d in (ceilo_details, watch_details, str_details,
- none_details):
+ for test_d in (ceilo_details, str_details, none_details):
rsrc.signal(details=test_d)
# Verify
mock_add.assert_any_call('SIGNAL', 'COMPLETE', ceilo_expected)
- mock_add.assert_any_call('SIGNAL', 'COMPLETE', watch_expected)
mock_add.assert_any_call('SIGNAL', 'COMPLETE', str_expected)
mock_add.assert_any_call('SIGNAL', 'COMPLETE', none_expected)
diff --git a/heat/tests/test_stack.py b/heat/tests/test_stack.py
index 1b59fc422..92f27be68 100644
--- a/heat/tests/test_stack.py
+++ b/heat/tests/test_stack.py
@@ -382,6 +382,31 @@ class StackTest(common.HeatTestCase):
self.assertEqual('A', all_resources[0].name)
@mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
+ def test_iter_resources_with_nonexistent_template(self, mock_db_call):
+ tpl = {'HeatTemplateFormatVersion': '2012-12-12',
+ 'Resources':
+ {'A': {'Type': 'GenericResourceType'},
+ 'B': {'Type': 'GenericResourceType'}}}
+ self.stack = stack.Stack(self.ctx, 'test_stack',
+ template.Template(tpl),
+ status_reason='blarg')
+
+ self.stack.store()
+
+ mock_rsc_a = mock.MagicMock(current_template_id=self.stack.t.id)
+ mock_rsc_a.name = 'A'
+ mock_rsc_b = mock.MagicMock(current_template_id=self.stack.t.id + 1)
+ mock_rsc_b.name = 'B'
+ mock_db_call.return_value = {
+ 'A': mock_rsc_a,
+ 'B': mock_rsc_b
+ }
+
+ all_resources = list(self.stack.iter_resources())
+
+ self.assertEqual(1, len(all_resources))
+
+ @mock.patch.object(resource_objects.Resource, 'get_all_by_stack')
def test_iter_resources_nested_with_filters(self, mock_db_call):
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources':
@@ -1280,8 +1305,8 @@ class StackTest(common.HeatTestCase):
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuv')
stack_names = ['_foo', '1bad', '.kcats', 'test stack', ' teststack',
'^-^', '"stack"', '1234', 'cat|dog', '$(foo)',
- 'test/stack', 'test\stack', 'test::stack', 'test;stack',
- 'test~stack', '#test', gt_255_chars]
+ 'test/stack', 'test\\stack', 'test::stack',
+ 'test;stack', 'test~stack', '#test', gt_255_chars]
for stack_name in stack_names:
ex = self.assertRaises(
exception.StackValidationFailed, stack.Stack,
@@ -1879,7 +1904,7 @@ class StackTest(common.HeatTestCase):
self.assertRaisesRegex(
exception.StackValidationFailed,
('Outputs.Resource_attr.Value.Fn::GetAtt: The Referenced '
- 'Attribute \(AResource Bar\) is incorrect.'),
+ r'Attribute \(AResource Bar\) is incorrect.'),
self.stack.validate)
def test_incorrect_outputs_cfn_incorrect_reference(self):
@@ -2242,7 +2267,7 @@ class StackTest(common.HeatTestCase):
self.assertRaisesRegex(
exception.StackValidationFailed,
('outputs.resource_attr.value.get_attr: The Referenced Attribute '
- '\(AResource Bar\) is incorrect.'),
+ r'\(AResource Bar\) is incorrect.'),
self.stack.validate)
def test_snapshot_save_called_first(self):
diff --git a/heat/tests/test_stack_resource.py b/heat/tests/test_stack_resource.py
index 64ece0c0d..cbb6a9538 100644
--- a/heat/tests/test_stack_resource.py
+++ b/heat/tests/test_stack_resource.py
@@ -689,7 +689,7 @@ class StackResourceAttrTest(StackResourceBaseTest):
output = {'outputs': []}
self.parent_resource._rpc_client.show_stack.return_value = [output]
- self.assertRaises(exception.InvalidTemplateAttribute,
+ self.assertRaises(exception.NotFound,
self.parent_resource.get_output,
"key")
@@ -701,7 +701,7 @@ class StackResourceAttrTest(StackResourceBaseTest):
output = {}
self.parent_resource._rpc_client.show_stack.return_value = [output]
- self.assertRaises(exception.InvalidTemplateAttribute,
+ self.assertRaises(exception.NotFound,
self.parent_resource.get_output,
"key")
diff --git a/heat/tests/test_stack_update.py b/heat/tests/test_stack_update.py
index 0168e85df..ce5e53bb4 100644
--- a/heat/tests/test_stack_update.py
+++ b/heat/tests/test_stack_update.py
@@ -599,8 +599,6 @@ class StackUpdateTest(common.HeatTestCase):
self.assertEqual((stack.Stack.UPDATE, stack.Stack.FAILED),
self.stack.state)
mock_del.assert_called_once_with()
- # Unset here so destroy() is not stubbed for stack.delete cleanup
- self.m.UnsetStubs()
def test_update_modify_replace_failed_create(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
@@ -1203,9 +1201,6 @@ class StackUpdateTest(common.HeatTestCase):
mock_create.assert_called_once_with()
self.assertEqual(2, mock_delete.call_count)
- # Unset here so delete() is not stubbed for stack.delete cleanup
- self.m.UnsetStubs()
-
def test_update_rollback_replace(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
@@ -1238,9 +1233,6 @@ class StackUpdateTest(common.HeatTestCase):
self.stack.state)
self.assertEqual(3, mock_delete.call_count)
- # Unset here so delete() is not stubbed for stack.delete cleanup
- self.m.UnsetStubs()
-
def test_update_replace_by_reference(self):
"""Test case for changes in dynamic attributes.
diff --git a/heat/tests/test_template.py b/heat/tests/test_template.py
index 0c6e137a1..a43466850 100644
--- a/heat/tests/test_template.py
+++ b/heat/tests/test_template.py
@@ -1225,10 +1225,8 @@ class TemplateTest(common.HeatTestCase):
snippet = {"Fn::GetAZs": ""}
stk = stack.Stack(self.ctx, 'test_stack',
template.Template(empty_template))
- self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
fc = fakes_nova.FakeClient()
- nova.NovaClientPlugin._create().AndReturn(fc)
- self.m.ReplayAll()
+ self.patchobject(nova.NovaClientPlugin, '_create', return_value=fc)
self.assertEqual(["nova1"], self.resolve(snippet, tmpl, stk))
def test_replace_string_values(self):
diff --git a/heat/tests/test_template_format.py b/heat/tests/test_template_format.py
index eb8daa596..beffdcaff 100644
--- a/heat/tests/test_template_format.py
+++ b/heat/tests/test_template_format.py
@@ -82,7 +82,7 @@ class JsonToYamlTest(common.HeatTestCase):
with open(path, 'r') as f:
json_str = f.read()
yml_str = template_format.convert_json_to_yaml(json_str)
- match = re.search('[\s,{]\d+\s*:', yml_str)
+ match = re.search(r'[\s,{]\d+\s*:', yml_str)
# Check that there are no matches of integer-only keys
# lacking explicit quotes
self.assertIsNone(match)
diff --git a/heat/tests/test_translation_rule.py b/heat/tests/test_translation_rule.py
index be336e218..bddc24791 100644
--- a/heat/tests/test_translation_rule.py
+++ b/heat/tests/test_translation_rule.py
@@ -597,6 +597,63 @@ class TestTranslationRule(common.HeatTestCase):
self.assertEqual('yellow', result)
self.assertEqual('yellow', tran.resolved_translations['far.0.red'])
+ def test_resolve_rule_nested_list_populated(self):
+ client_plugin, schema = self._test_resolve_rule_nested_list()
+ data = {
+ 'instances': [{'networks': [{'port': 'port1', 'net': 'net1'}]}]
+ }
+ props = properties.Properties(schema, data)
+ rule = translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ ['instances', 'networks', 'port'],
+ client_plugin=client_plugin,
+ finder='find_name_id',
+ entity='port'
+ )
+ tran = translation.Translation(props)
+ tran.set_rules([rule])
+ self.assertTrue(tran.has_translation('instances.networks.port'))
+ result = tran.translate('instances.0.networks.0.port',
+ data['instances'][0]['networks'][0]['port'])
+ self.assertEqual('port1_id', result)
+ self.assertEqual('port1_id', tran.resolved_translations[
+ 'instances.0.networks.0.port'])
+
+ def _test_resolve_rule_nested_list(self):
+ class FakeClientPlugin(object):
+ def find_name_id(self, entity=None, value=None):
+ if entity == 'net':
+ return 'net1_id'
+ if entity == 'port':
+ return 'port1_id'
+
+ schema = {
+ 'instances': properties.Schema(
+ properties.Schema.LIST,
+ schema=properties.Schema(
+ properties.Schema.MAP,
+ schema={
+ 'networks': properties.Schema(
+ properties.Schema.LIST,
+ schema=properties.Schema(
+ properties.Schema.MAP,
+ schema={
+ 'port': properties.Schema(
+ properties.Schema.STRING,
+ ),
+ 'net': properties.Schema(
+ properties.Schema.STRING,
+ ),
+ }
+ )
+ )
+ }
+ )
+ )}
+
+ return FakeClientPlugin(), schema
+
def test_resolve_rule_list_with_function(self):
client_plugin, schema = self._test_resolve_rule(is_list=True)
join_func = cfn_funcs.Join(None,
diff --git a/heat/tests/test_validate.py b/heat/tests/test_validate.py
index 503ca3d64..d2f885abf 100644
--- a/heat/tests/test_validate.py
+++ b/heat/tests/test_validate.py
@@ -207,6 +207,19 @@ test_template_findinmap_invalid = '''
}
'''
+test_template_bad_yaql_metadata = '''
+heat_template_version: 2016-10-14
+parameters:
+resources:
+ my_instance:
+ type: OS::Heat::TestResource
+ metadata:
+ test:
+ yaql:
+ expression: {'foo': 'bar'}
+ data: "$.data"
+'''
+
test_template_invalid_resources = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
@@ -1054,6 +1067,12 @@ class ValidateTest(common.HeatTestCase):
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertNotEqual(res['Description'], 'Successfully validated')
+ def test_validate_bad_yaql_metadata(self):
+ t = template_format.parse(test_template_bad_yaql_metadata)
+ res = dict(self.engine.validate_template(self.ctx, t, {}))
+ self.assertIn('Error', res)
+ self.assertIn('yaql', res['Error'])
+
def test_validate_parameters(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
res = dict(self.engine.validate_template(self.ctx, t, {}))
@@ -1344,8 +1363,10 @@ class ValidateTest(common.HeatTestCase):
t = template_format.parse(test_template_snapshot_deletion_policy)
res = dict(self.engine.validate_template(self.ctx, t, {}))
- self.assertEqual(
- {'Error': '"Snapshot" deletion policy not supported'}, res)
+ self.assertEqual({'Error': 'Resources.WikiDatabase.DeletionPolicy: '
+ '"Snapshot" deletion policy '
+ 'not supported'},
+ res)
def test_volume_snapshot_deletion_policy(self):
t = template_format.parse(test_template_volume_snapshot)
@@ -2018,3 +2039,69 @@ parameter_groups:
self.ctx, t, {})
self.assertEqual(exception.InvalidSchemaError,
exc.exc_info[0])
+
+ def test_validate_empty_resource_group(self):
+ engine = service.EngineService('a', 't')
+ params = {
+ "resource_registry": {
+ "OS::Test::TestResource": "https://server.test/nested.template"
+ }
+ }
+ root_template_str = '''
+heat_template_version: 2015-10-15
+parameters:
+ test_root_param:
+ type: string
+resources:
+ Group:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: 0
+ resource_def:
+ type: OS::Test::TestResource
+'''
+ nested_template_str = '''
+heat_template_version: 2015-10-15
+parameters:
+ test_param:
+ type: string
+'''
+ root_template = template_format.parse(root_template_str)
+
+ self.patchobject(urlfetch, 'get')
+ urlfetch.get.return_value = nested_template_str
+
+ res = dict(engine.validate_template(self.ctx, root_template,
+ params, show_nested=True))
+ expected = {
+ 'Description': 'No description',
+ 'Environment': {
+ 'event_sinks': [],
+ 'parameter_defaults': {},
+ 'parameters': {},
+ 'resource_registry': {
+ 'OS::Test::TestResource':
+ 'https://server.test/nested.template',
+ 'resources': {}}},
+ 'NestedParameters': {
+ 'Group': {
+ 'Description': 'No description',
+ 'Parameters': {},
+ 'Type': 'OS::Heat::ResourceGroup',
+ 'NestedParameters': {
+ '0': {
+ 'Description': 'No description',
+ 'Parameters': {
+ 'test_param': {
+ 'Description': '',
+ 'Label': 'test_param',
+ 'NoEcho': 'false',
+ 'Type': 'String'}},
+ 'Type': 'OS::Test::TestResource'}}}},
+ 'Parameters': {
+ 'test_root_param': {
+ 'Description': '',
+ 'Label': 'test_root_param',
+ 'NoEcho': 'false',
+ 'Type': 'String'}}}
+ self.assertEqual(expected, res)
diff --git a/heat/tests/test_watch.py b/heat/tests/test_watch.py
deleted file mode 100644
index f22ca4639..000000000
--- a/heat/tests/test_watch.py
+++ /dev/null
@@ -1,978 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import datetime
-
-import mock
-from oslo_utils import timeutils
-
-from heat.common import exception
-from heat.engine import stack
-from heat.engine import template
-from heat.engine import watchrule
-from heat.objects import watch_rule
-from heat.tests import common
-from heat.tests import utils
-
-
-class WatchData(object):
- def __init__(self, data, created_at):
- self.created_at = created_at
- self.data = {'test_metric': {'Value': data,
- 'Unit': 'Count'}}
-
-
-class DummyAction(object):
- signal = "DummyAction"
-
-
-class WatchRuleTest(common.HeatTestCase):
- stack_id = None
-
- def setUp(self):
- super(WatchRuleTest, self).setUp()
-
- self.username = 'watchrule_test_user'
- self.ctx = utils.dummy_context()
- self.ctx.auth_token = 'abcd1234'
-
- self._setup_database()
-
- def _setup_database(self):
- if self.stack_id is not None:
- return
- # Create a dummy stack in the DB as WatchRule instances
- # must be associated with a stack
- empty_tmpl = {'HeatTemplateFormatVersion': '2012-12-12'}
- tmpl = template.Template(empty_tmpl)
- stack_name = 'dummystack'
- dummy_stack = stack.Stack(self.ctx, stack_name, tmpl)
- dummy_stack.state_set(dummy_stack.CREATE, dummy_stack.COMPLETE,
- 'Testing')
- dummy_stack.store()
-
- self.stack_id = dummy_stack.id
-
- def _setup_action_mocks(self, mock_get_resource, now,
- action_expected=True):
- """Setup stubs for the action tests."""
- timeutils.set_time_override(now)
- self.addCleanup(timeutils.clear_time_override)
-
- if action_expected:
- dummy_action = DummyAction()
- mock_get_resource.return_value = dummy_action
-
- def test_minimum(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'Period': '300',
- 'Statistic': 'Minimum',
- 'ComparisonOperator': 'LessThanOrEqualToThreshold',
- 'Threshold': '50'}
-
- now = timeutils.utcnow()
- last = now - datetime.timedelta(seconds=320)
- data = [WatchData(77, now - datetime.timedelta(seconds=100))]
-
- # Test 1 - Values greater than 0 are normal
- data.append(WatchData(53, now - datetime.timedelta(seconds=150)))
- wr = watchrule.WatchRule(self.ctx,
- 'testwatch',
- rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- new_state = wr.get_alarm_state()
- self.assertEqual('NORMAL', new_state)
-
- # Test 2
- data.append(WatchData(25, now - datetime.timedelta(seconds=250)))
- wr = watchrule.WatchRule(self.ctx,
- 'testwatch',
- rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- new_state = wr.get_alarm_state()
- self.assertEqual('ALARM', new_state)
-
- def test_maximum(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- last = now - datetime.timedelta(seconds=320)
- data = [WatchData(7, now - datetime.timedelta(seconds=100))]
-
- # Test 1 - values less than 30 are normal
- data.append(WatchData(23, now - datetime.timedelta(seconds=150)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('NORMAL', new_state)
-
- # Test 2
- data.append(WatchData(35, now - datetime.timedelta(seconds=150)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('ALARM', new_state)
-
- def test_samplecount(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'Period': '300',
- 'Statistic': 'SampleCount',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '3'}
-
- now = timeutils.utcnow()
- last = now - datetime.timedelta(seconds=320)
- data = [WatchData(1, now - datetime.timedelta(seconds=100))]
-
- # Test 1 - 2 samples is normal
- data.append(WatchData(1, now - datetime.timedelta(seconds=150)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('NORMAL', new_state)
-
- # Test 2 - 3 samples is an alarm
- data.append(WatchData(1, now - datetime.timedelta(seconds=200)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('ALARM', new_state)
-
- # Test 3 - 3 samples (one old) is normal
- data.pop(0)
- data.append(WatchData(1, now - datetime.timedelta(seconds=400)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('NORMAL', new_state)
-
- def test_sum(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'Period': '300',
- 'Statistic': 'Sum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '100'}
-
- now = timeutils.utcnow()
- last = now - datetime.timedelta(seconds=320)
- data = [WatchData(17, now - datetime.timedelta(seconds=100))]
-
- # Test 1 - values less than 40 are normal
- data.append(WatchData(23, now - datetime.timedelta(seconds=150)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('NORMAL', new_state)
-
- # Test 2 - sum greater than 100 is an alarm
- data.append(WatchData(85, now - datetime.timedelta(seconds=150)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('ALARM', new_state)
-
- def test_average(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'Period': '300',
- 'Statistic': 'Average',
- 'ComparisonOperator': 'GreaterThanThreshold',
- 'Threshold': '100'}
-
- now = timeutils.utcnow()
- last = now - datetime.timedelta(seconds=320)
- data = [WatchData(117, now - datetime.timedelta(seconds=100))]
-
- # Test 1
- data.append(WatchData(23, now - datetime.timedelta(seconds=150)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('NORMAL', new_state)
-
- # Test 2
- data.append(WatchData(195, now - datetime.timedelta(seconds=250)))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=data,
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.now = now
- new_state = wr.get_alarm_state()
- self.assertEqual('ALARM', new_state)
-
- def test_load(self):
- # Setup
- # Insert two dummy watch rules into the DB
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmActions': [u'WebServerRestartPolicy'],
- u'AlarmDescription': u'Restart the WikiDatabase',
- u'Namespace': u'system/linux',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'ServiceFailure'}
- rules = []
- rules.append(watchrule.WatchRule(context=self.ctx,
- watch_name='HttpFailureAlarm',
- rule=rule,
- watch_data=[],
- stack_id=self.stack_id,
- state='NORMAL'))
- rules[0].store()
-
- rules.append(watchrule.WatchRule(context=self.ctx,
- watch_name='AnotherWatch',
- rule=rule,
- watch_data=[],
- stack_id=self.stack_id,
- state='NORMAL'))
- rules[1].store()
-
- # Test
- for wn in ('HttpFailureAlarm', 'AnotherWatch'):
- wr = watchrule.WatchRule.load(self.ctx, wn)
- self.assertIsInstance(wr, watchrule.WatchRule)
- self.assertEqual(wn, wr.name)
- self.assertEqual('NORMAL', wr.state)
- self.assertEqual(rule, wr.rule)
- self.assertEqual(datetime.timedelta(seconds=int(rule['Period'])),
- wr.timeperiod)
-
- def test_store(self):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmActions': [u'WebServerRestartPolicy'],
- u'AlarmDescription': u'Restart the WikiDatabase',
- u'Namespace': u'system/linux',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'ServiceFailure'}
-
- # Test
- wr = watchrule.WatchRule(context=self.ctx, watch_name='storetest',
- stack_id=self.stack_id, rule=rule)
- wr.store()
-
- # Verify
- dbwr = watch_rule.WatchRule.get_by_name(self.ctx, 'storetest')
- self.assertIsNotNone(dbwr)
- self.assertEqual('storetest', dbwr.name)
- self.assertEqual(watchrule.WatchRule.NODATA, dbwr.state)
- self.assertEqual(rule, dbwr.rule)
-
- def test_evaluate(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- timeutils.set_time_override(now)
- self.addCleanup(timeutils.clear_time_override)
-
- # Test 1 - It's not time to evaluate, so should stay NODATA
- last = now - datetime.timedelta(seconds=299)
- data = WatchData(25, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- actions = wr.evaluate()
- self.assertEqual('NODATA', wr.state)
- self.assertEqual([], actions)
-
- # Test 2 - now - last == Period, so should set NORMAL
- last = now - datetime.timedelta(seconds=300)
- data = WatchData(25, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- actions = wr.evaluate()
- self.assertEqual('NORMAL', wr.state)
- self.assertEqual(now, wr.last_evaluated)
- self.assertEqual([], actions)
-
- # Test 3 - Now data breaches Threshold, so should set ALARM
- last = now - datetime.timedelta(seconds=300)
- data = WatchData(35, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- actions = wr.evaluate()
- self.assertEqual('ALARM', wr.state)
- self.assertEqual(now, wr.last_evaluated)
- self.assertEqual([], actions)
-
- def test_evaluate_suspend(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- timeutils.set_time_override(now)
- self.addCleanup(timeutils.clear_time_override)
-
- last = now - datetime.timedelta(seconds=300)
- data = WatchData(35, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.state_set(wr.SUSPENDED)
-
- # Test
- actions = wr.evaluate()
- self.assertEqual(wr.SUSPENDED, wr.state)
- self.assertEqual([], actions)
-
- def test_evaluate_ceilometer_controlled(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- timeutils.set_time_override(now)
- self.addCleanup(timeutils.clear_time_override)
-
- last = now - datetime.timedelta(seconds=300)
- data = WatchData(35, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
- wr.state_set(wr.CEILOMETER_CONTROLLED)
-
- # Test
- actions = wr.evaluate()
- self.assertEqual(wr.CEILOMETER_CONTROLLED, wr.state)
- self.assertEqual([], actions)
-
- @mock.patch('heat.engine.stack.Stack.resource_by_refid')
- def test_rule_actions_alarm_normal(self, mock_get_resource):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'AlarmActions': ['DummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- self._setup_action_mocks(mock_get_resource, now,
- action_expected=False)
-
- # Set data so rule evaluates to NORMAL state
- last = now - datetime.timedelta(seconds=300)
- data = WatchData(25, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- # Test
- actions = wr.evaluate()
- self.assertEqual('NORMAL', wr.state)
- self.assertEqual([], actions)
- self.assertEqual(0, mock_get_resource.call_count)
-
- @mock.patch('heat.engine.stack.Stack.resource_by_refid')
- def test_rule_actions_alarm_alarm(self, mock_get_resource):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'AlarmActions': ['DummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- self._setup_action_mocks(mock_get_resource, now)
-
- # Set data so rule evaluates to ALARM state
- last = now - datetime.timedelta(seconds=300)
- data = WatchData(35, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- # Test
- actions = wr.evaluate()
- self.assertEqual('ALARM', wr.state)
- self.assertEqual(['DummyAction'], actions)
-
- # re-set last_evaluated so the rule will be evaluated again.
- last = now - datetime.timedelta(seconds=300)
- wr.last_evaluated = last
- actions = wr.evaluate()
- self.assertEqual('ALARM', wr.state)
- self.assertEqual(['DummyAction'], actions)
- self.assertGreater(mock_get_resource.call_count, 0)
-
- @mock.patch('heat.engine.stack.Stack.resource_by_refid')
- def test_rule_actions_alarm_two_actions(self, mock_get_resource):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'AlarmActions': ['DummyAction', 'AnotherDummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- self._setup_action_mocks(mock_get_resource, now)
-
- # Set data so rule evaluates to ALARM state
- last = now - datetime.timedelta(seconds=300)
- data = WatchData(35, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- # Test
- actions = wr.evaluate()
- self.assertEqual('ALARM', wr.state)
- self.assertEqual(['DummyAction', 'DummyAction'], actions)
- self.assertGreater(mock_get_resource.call_count, 0)
-
- @mock.patch('heat.engine.stack.Stack.resource_by_refid')
- def test_rule_actions_ok_alarm(self, mock_get_resource):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'OKActions': ['DummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- self._setup_action_mocks(mock_get_resource, now, action_expected=False)
-
- # On creation the rule evaluates to NODATA state
- last = now - datetime.timedelta(seconds=300)
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- # Test
- actions = wr.evaluate()
- self.assertEqual('NODATA', wr.state)
- self.assertEqual([], actions)
-
- # Move time forward and add data below threshold so we transition from
- # ALARM -> NORMAL, so evaluate() should output a 'DummyAction'
- now = now + datetime.timedelta(seconds=300)
- self._setup_action_mocks(mock_get_resource, now)
-
- data = WatchData(25, now - datetime.timedelta(seconds=150))
- wr.watch_data = [data]
-
- actions = wr.evaluate()
- self.assertEqual('NORMAL', wr.state)
- self.assertEqual(['DummyAction'], actions)
- self.assertGreater(mock_get_resource.call_count, 0)
-
- @mock.patch('heat.engine.stack.Stack.resource_by_refid')
- def test_rule_actions_nodata(self, mock_get_resource):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'InsufficientDataActions': ['DummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- self._setup_action_mocks(mock_get_resource, now, action_expected=False)
-
- # Set data so rule evaluates to ALARM state
- last = now - datetime.timedelta(seconds=300)
- data = WatchData(35, now - datetime.timedelta(seconds=150))
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[data],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- # Test
- actions = wr.evaluate()
- self.assertEqual('ALARM', wr.state)
- self.assertEqual([], actions)
-
- # Move time forward and don't add data so we transition from
- # ALARM -> NODATA, so evaluate() should output a 'DummyAction'
- now = now + datetime.timedelta(seconds=300)
- self._setup_action_mocks(mock_get_resource, now)
-
- actions = wr.evaluate()
- self.assertEqual('NODATA', wr.state)
- self.assertEqual(['DummyAction'], actions)
- self.assertGreater(mock_get_resource.call_count, 0)
-
- @mock.patch('heat.engine.stack.Stack.resource_by_refid')
- def test_to_ceilometer(self, mock_get_resource):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmDescription': u'test alarm',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'CreateDataMetric'}
- testdata = {u'CreateDataMetric': {"Unit": "Counter", "Value": "1"}}
-
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='create_data_test',
- stack_id=self.stack_id,
- rule=rule)
- wr.store()
-
- mock_ceilometer_client = mock.MagicMock()
-
- self.ctx._clients = mock.MagicMock()
- self.ctx._clients.client.return_value = mock_ceilometer_client
-
- # Test
- wr._to_ceilometer(testdata)
-
- # Verify
- self.assertEqual(1, mock_ceilometer_client.samples.create.call_count)
- create_kw_args = mock_ceilometer_client.samples.create.call_args[1]
- expected = {
- 'counter_type': 'gauge',
- 'counter_name': 'CreateDataMetric',
- 'counter_volume': '1',
- 'counter_unit': 'Counter',
- 'resource_metadata': {},
- 'resource_id': None,
- }
- self.assertEqual(expected, create_kw_args)
-
- def test_create_watch_data(self):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmDescription': u'test alarm',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'CreateDataMetric'}
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='create_data_test',
- stack_id=self.stack_id,
- rule=rule)
-
- wr.store()
-
- # Test
- data = {u'CreateDataMetric': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": []}}
- wr.create_watch_data(data)
-
- # Verify
- obj_wr = watch_rule.WatchRule.get_by_name(self.ctx, 'create_data_test')
- obj_wds = [wd for wd in obj_wr.watch_data]
- self.assertEqual(data, obj_wds[0].data)
-
- # Note, would be good to write another datapoint and check it
- # but sqlite seems to not interpret the backreference correctly
- # so dbwr.watch_data is always a list containing only the latest
- # datapoint. In non-test use on mysql this is not the case, we
- # correctly get a list of all datapoints where watch_rule_id ==
- # watch_rule.id, so leave it as a single-datapoint test for now.
-
- def test_create_watch_data_suspended(self):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmDescription': u'test alarm',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'MetricName': u'CreateDataMetric'}
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='create_data_test',
- stack_id=self.stack_id,
- rule=rule,
- state=watchrule.WatchRule.SUSPENDED)
-
- wr.store()
-
- # Test
- data = {u'CreateDataMetric': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": []}}
- wr.create_watch_data(data)
-
- # Verify
- obj_wr = watch_rule.WatchRule.get_by_name(self.ctx, 'create_data_test')
- obj_wds = [wd for wd in obj_wr.watch_data]
- self.assertEqual([], obj_wds)
-
- def test_create_watch_data_match(self):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmDescription': u'test alarm',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'Dimensions': [{u'Name': 'AutoScalingGroupName',
- u'Value': 'group_x'}],
- u'MetricName': u'CreateDataMetric'}
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='create_data_test',
- stack_id=self.stack_id,
- rule=rule)
- wr.store()
-
- # Test
- data = {u'CreateDataMetric': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": [{u'AutoScalingGroupName':
- u'group_x'}]}}
- self.assertTrue(watchrule.rule_can_use_sample(wr, data))
-
- def test_create_watch_data_match_2(self):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmDescription': u'test alarm',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'Dimensions': [{u'Name': 'AutoScalingGroupName',
- u'Value': 'group_x'}],
- u'MetricName': u'CreateDataMetric'}
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='create_data_test',
- stack_id=self.stack_id,
- rule=rule)
- wr.store()
-
- # Test
- data = {u'not_interesting': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": [
- {u'AutoScalingGroupName':
- u'group_x'}]},
- u'CreateDataMetric': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": [
- {u'AutoScalingGroupName':
- u'group_x'}]}}
- self.assertTrue(watchrule.rule_can_use_sample(wr, data))
-
- def test_create_watch_data_match_3(self):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmDescription': u'test alarm',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'Dimensions': [{u'Name': 'AutoScalingGroupName',
- u'Value': 'group_x'}],
- u'MetricName': u'CreateDataMetric'}
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='create_data_test',
- stack_id=self.stack_id,
- rule=rule)
- wr.store()
-
- # Test
- data = {u'CreateDataMetric': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": [
- {u'AutoScalingGroupName':
- u'group_x'}]}}
- self.assertTrue(watchrule.rule_can_use_sample(wr, data))
-
- def test_create_watch_data_not_match_metric(self):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmDescription': u'test alarm',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'Dimensions': [{u'Name': 'AutoScalingGroupName',
- u'Value': 'group_x'}],
- u'MetricName': u'CreateDataMetric'}
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='create_data_test',
- stack_id=self.stack_id,
- rule=rule)
- wr.store()
-
- # Test
- data = {u'not_this': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": [
- {u'AutoScalingGroupName':
- u'group_x'}]},
- u'nor_this': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": [
- {u'AutoScalingGroupName':
- u'group_x'}]}}
- self.assertFalse(watchrule.rule_can_use_sample(wr, data))
-
- def test_create_watch_data_not_match_dimensions(self):
- # Setup
- rule = {u'EvaluationPeriods': u'1',
- u'AlarmDescription': u'test alarm',
- u'Period': u'300',
- u'ComparisonOperator': u'GreaterThanThreshold',
- u'Statistic': u'SampleCount',
- u'Threshold': u'2',
- u'Dimensions': [{u'Name': 'AutoScalingGroupName',
- u'Value': 'group_x'}],
- u'MetricName': u'CreateDataMetric'}
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='create_data_test',
- stack_id=self.stack_id,
- rule=rule)
- wr.store()
-
- # Test
- data = {u'CreateDataMetric': {"Unit": "Counter",
- "Value": "1",
- "Dimensions": [
- {u'wrong_key':
- u'group_x'}]}}
- self.assertFalse(watchrule.rule_can_use_sample(wr, data))
-
- def test_destroy(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'AlarmActions': ['DummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- last = timeutils.utcnow()
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name='testwatch_destroy',
- rule=rule,
- watch_data=[],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- wr.store()
-
- # Sanity Check
- check = watchrule.WatchRule.load(context=self.ctx,
- watch_name='testwatch_destroy')
- self.assertIsInstance(check, watchrule.WatchRule)
-
- # Test
- wr.destroy()
- ex = self.assertRaises(exception.EntityNotFound,
- watchrule.WatchRule.load,
- context=self.ctx,
- watch_name='testwatch_destroy')
- self.assertEqual('Watch Rule', ex.kwargs.get('entity'))
-
- def test_state_set(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'AlarmActions': ['DummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- last = timeutils.utcnow()
- watcher = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch_set_state",
- rule=rule,
- watch_data=[],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- # Test
- watcher.state_set(watcher.SUSPENDED)
-
- # Verify
- self.assertEqual(watcher.SUSPENDED, watcher.state)
- check = watchrule.WatchRule.load(context=self.ctx,
- watch_name='testwatch_set_state')
- self.assertEqual(watchrule.WatchRule.SUSPENDED, check.state)
-
- @mock.patch('heat.engine.stack.Stack.resource_by_refid')
- def test_set_watch_state(self, mock_get_resource):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'AlarmActions': ['DummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
- self._setup_action_mocks(mock_get_resource, now)
-
- # Set data so rule evaluates to ALARM state
- last = now - datetime.timedelta(seconds=200)
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- # Test
- actions = wr.set_watch_state(watchrule.WatchRule.NODATA)
- self.assertEqual([], actions)
-
- actions = wr.set_watch_state(watchrule.WatchRule.NORMAL)
- self.assertEqual([], actions)
-
- actions = wr.set_watch_state(watchrule.WatchRule.ALARM)
- self.assertEqual(['DummyAction'], actions)
- self.assertGreater(mock_get_resource.call_count, 0)
-
- def test_set_watch_state_invalid(self):
- # Setup
- rule = {'EvaluationPeriods': '1',
- 'MetricName': 'test_metric',
- 'AlarmActions': ['DummyAction'],
- 'Period': '300',
- 'Statistic': 'Maximum',
- 'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
- 'Threshold': '30'}
-
- now = timeutils.utcnow()
-
- last = now - datetime.timedelta(seconds=200)
- wr = watchrule.WatchRule(context=self.ctx,
- watch_name="testwatch",
- rule=rule,
- watch_data=[],
- stack_id=self.stack_id,
- last_evaluated=last)
-
- # Test
- self.assertRaises(ValueError, wr.set_watch_state, None)
- self.assertRaises(ValueError, wr.set_watch_state, "BADSTATE")
diff --git a/heat_integrationtests/README.rst b/heat_integrationtests/README.rst
index bf556c60a..ec4635a56 100644
--- a/heat_integrationtests/README.rst
+++ b/heat_integrationtests/README.rst
@@ -2,28 +2,24 @@
Heat integration tests
======================
-These tests can be run as a tempest plugin against any heat-enabled OpenStack
-cloud, however defaults match running against a recent DevStack.
+These tests can be run against any heat-enabled OpenStack cloud, however
+defaults match running against a recent DevStack.
-To run the tests against DevStack, do the following:
-
- # Define DEST
+To run the tests against DevStack, do the following::
export DEST=/opt/stack
- # create test resources and write tempest config
-
+ # create test resources and write config
$DEST/heat/heat_integrationtests/prepare_test_env.sh
-
$DEST/heat/heat_integrationtests/prepare_test_network.sh
- # run tempest selecting only these tests
-
- cd $DEST/tempest
-
- tempest run --regex heat_integrationtests
-
-If custom configuration is required, edit the [heat_plugin] section of
+ # run the heat integration tests
+ cd $DEST/heat
+ stestr --test-path=heat_integrationtests run
- $DEST/tempest/etc/tempest.conf
+If the Heat Tempest Plugin is also installed, the tests from that will be run
+as well.
+If custom configuration is required, add it in the file
+``heat_integrationtests/heat_integrationtests.conf``. A sample configuration is
+available in ``heat_integrationtests/heat_integrationtests.conf.sample``
diff --git a/heat_integrationtests/__init__.py b/heat_integrationtests/__init__.py
index e69de29bb..ec0913317 100644
--- a/heat_integrationtests/__init__.py
+++ b/heat_integrationtests/__init__.py
@@ -0,0 +1,61 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import unittest
+
+from heat_integrationtests.common import config
+
+from oslo_log import log as logging
+
+LOG = logging.getLogger(__name__, project=__name__)
+
+
+def load_tests(loader, standard_tests, pattern):
+ logging.setup(config.init_conf(), __name__)
+
+ suite = unittest.TestSuite()
+
+ heat_integration_dir = os.path.dirname(os.path.abspath(__file__))
+ top_level_dir = os.path.split(heat_integration_dir)[0]
+ if pattern:
+ discovered = loader.discover(heat_integration_dir, pattern=pattern,
+ top_level_dir=top_level_dir)
+ else:
+ discovered = loader.discover(heat_integration_dir,
+ top_level_dir=top_level_dir)
+ suite.addTests(discovered)
+
+ # Discover tests from the heat-tempest-plugin if it is present, using
+ # the Tempest plugin mechanism so we don't need a hard dependency on it.
+ from tempest.test_discover import plugins as tempest_plugins
+
+ ext_plugins = tempest_plugins.TempestTestPluginManager()
+ plugin_data = ext_plugins.get_plugin_load_tests_tuple()
+ heat_plugin_data = plugin_data.get('heat')
+ if heat_plugin_data is not None:
+ plugin_dir, plugin_path = heat_plugin_data
+ LOG.info('Found Heat Tempest plugin: %s, %s', plugin_dir, plugin_path)
+ if pattern:
+ discovered = loader.discover(plugin_dir, pattern=pattern,
+ top_level_dir=plugin_path)
+ else:
+ discovered = loader.discover(plugin_dir,
+ top_level_dir=plugin_path)
+ suite.addTests(discovered)
+ else:
+ LOG.error('Heat Tempest plugin not found')
+ LOG.info('Available Tempest plugins: %s',
+ ', '.join(plugin_data.keys()))
+
+ return suite
diff --git a/heat_integrationtests/api/__init__.py b/heat_integrationtests/api/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/heat_integrationtests/api/__init__.py
+++ /dev/null
diff --git a/heat_integrationtests/api/gabbits/environments.yaml b/heat_integrationtests/api/gabbits/environments.yaml
deleted file mode 100644
index 17ac47692..000000000
--- a/heat_integrationtests/api/gabbits/environments.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-defaults:
- request_headers:
- X-Auth-Token: $ENVIRON['OS_TOKEN']
-
-tests:
-- name: environment with parameter
- POST: /stacks
- request_headers:
- content-type: application/json
- data:
- files: {}
- disable_rollback: true
- parameters: {}
- stack_name: $ENVIRON['PREFIX']-envstack
- environment:
- parameters:
- test_val: test
- template:
- heat_template_version: '2016-04-08'
- parameters:
- test_val:
- type: string
- resources:
- test:
- type: OS::Heat::TestResource
- properties:
- value: {get_param: test_val}
- outputs:
- output_value:
- value: {get_attr: [test, output]}
-
- status: 201
- response_headers:
- location: //stacks/$ENVIRON['PREFIX']-envstack/[a-f0-9-]+/
-
-- name: poll for envstack CREATE_COMPLETE
- GET: $LOCATION
- redirects: True
- poll:
- count: 5
- delay: 1.0
- response_json_paths:
- $.stack.stack_status: CREATE_COMPLETE
-
-- name: get stack output
- GET: $LAST_URL/outputs/output_value
- redirects: True
- status: 200
- response_json_paths:
- $.output.output_value: test
-
-- name: delete envstack
- DELETE: /stacks/$ENVIRON['PREFIX']-envstack
- redirects: True
- status: 204
diff --git a/heat_integrationtests/api/gabbits/resources.yaml b/heat_integrationtests/api/gabbits/resources.yaml
deleted file mode 100644
index 41da4448d..000000000
--- a/heat_integrationtests/api/gabbits/resources.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-defaults:
- request_headers:
- X-Auth-Token: $ENVIRON['OS_TOKEN']
-
-tests:
-- name: create stack with resources
- POST: /stacks
- request_headers:
- content-type: application/json
- data:
- files: {}
- disable_rollback: true
- parameters: {}
- stack_name: $ENVIRON['PREFIX']-rsrcstack
- template:
- heat_template_version: '2016-04-08'
- parameters:
- test_val:
- type: string
- default: test
- resources:
- test:
- type: OS::Heat::TestResource
- properties:
- value: {get_param: test_val}
-
- status: 201
- response_headers:
- location: //stacks/$ENVIRON['PREFIX']-rsrcstack/[a-f0-9-]+/
-
-- name: poll for rsrcstack CREATE_COMPLETE
- GET: $LOCATION
- redirects: True
- poll:
- count: 5
- delay: 1.0
- response_json_paths:
- $.stack.stack_status: CREATE_COMPLETE
-
-- name: list resources
- GET: $LAST_URL/resources
- request_headers:
- content-type: application/json
- status: 200
- response_json_paths:
- $.resources[0].logical_resource_id: test
- $.resources[0].resource_status: CREATE_COMPLETE
-
-- name: list filtered resources
- GET: $LAST_URL
- request_headers:
- content-type: application/json
- query_parameters:
- type: OS::Nova::Server
- status: 200
- response_json_paths:
- $.resources: []
-
-- name: show resource
- GET: $LAST_URL/test
- request_headers:
- content-type: application/json
- status: 200
- response_json_paths:
- $.resource.attributes.output: test
-
-- name: mark resource unhealthy
- PATCH: $LAST_URL
- request_headers:
- content-type: application/json
- data:
- mark_unhealthy: true
- resource_status_reason: 'resource deleted'
- status: 200
-
-- name: show unhealthy resource
- GET: $LAST_URL
- status: 200
- response_json_paths:
- $.resource.resource_status: CHECK_FAILED
- $.resource.resource_status_reason: 'resource deleted'
-
-- name: signal resource
- POST: $LAST_URL/signal
- status: 400
-
-- name: delete stack with resources
- DELETE: /stacks/$ENVIRON['PREFIX']-rsrcstack
- redirects: True
- status: 204
diff --git a/heat_integrationtests/api/gabbits/resourcetypes.yaml b/heat_integrationtests/api/gabbits/resourcetypes.yaml
deleted file mode 100644
index 0730cc8fc..000000000
--- a/heat_integrationtests/api/gabbits/resourcetypes.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-defaults:
- request_headers:
- X-Auth-Token: $ENVIRON['OS_TOKEN']
-
-tests:
-- name: list resource types
- GET: /resource_types
- status: 200
-
-- name: show resource type
- GET: /resource_types/OS::Heat::TestResource
- status: 200
- response_json_paths:
- $.support_status.status: SUPPORTED
- $.properties.wait_secs.default: 0
-
-- name: resource type template
- GET: /resource_types/OS::Heat::TestResource/template
- query_parameters:
- template_type: hot
- status: 200
- response_json_paths:
- $.resources.TestResource.type: OS::Heat::TestResource
- $.heat_template_version: '2016-10-14'
diff --git a/heat_integrationtests/api/gabbits/stacks.yaml b/heat_integrationtests/api/gabbits/stacks.yaml
deleted file mode 100644
index cb67e713a..000000000
--- a/heat_integrationtests/api/gabbits/stacks.yaml
+++ /dev/null
@@ -1,162 +0,0 @@
-defaults:
- request_headers:
- X-Auth-Token: $ENVIRON['OS_TOKEN']
-
-tests:
-- name: stack list
- GET: /stacks
- status: 200
- response_headers:
- content-type: application/json
-
-- name: create empty stack
- POST: /stacks
- request_headers:
- content-type: application/json
- data:
- files: {}
- disable_rollback: true
- parameters: {}
- stack_name: $ENVIRON['PREFIX']-empty
- environment: {}
- template:
- heat_template_version: '2016-04-08'
-
- status: 201
- response_headers:
- location: //stacks/$ENVIRON['PREFIX']-empty/[a-f0-9-]+/
-
-
-- name: poll for empty CREATE_COMPLETE
- GET: $LOCATION
- redirects: True
- poll:
- count: 5
- delay: 1.0
- response_json_paths:
- $.stack.stack_status: CREATE_COMPLETE
-
-- name: show empty stack
- GET: $LAST_URL
- redirects: True
- status: 200
-
-- name: delete empty stack
- DELETE: $LAST_URL
- redirects: True
- status: 204
-
-- name: create stack
- POST: /stacks
- request_headers:
- content-type: application/json
- data:
- files: {}
- disable_rollback: true
- parameters: {'test_val': value}
- stack_name: $ENVIRON['PREFIX']-stack
- template:
- heat_template_version: pike
- parameters:
- test_val:
- type: string
- resources:
- test:
- type: OS::Heat::TestResource
- properties:
- value: {get_param: test_val}
- outputs:
- output_value:
- value: {get_attr: [test, output]}
-
- status: 201
- response_headers:
- location: //stacks/$ENVIRON['PREFIX']-stack/[a-f0-9-]+/
-
-- name: poll for stack CREATE_COMPLETE
- GET: $LOCATION
- redirects: True
- poll:
- count: 5
- delay: 1.0
- response_json_paths:
- $.stack.stack_status: CREATE_COMPLETE
-
-- name: show stack
- GET: $LAST_URL
- redirects: True
- status: 200
-
-- name: update stack
- PUT: $LAST_URL
- request_headers:
- content-type: application/json
- data:
- files: {}
- disable_rollback: true
- parameters: {'test_val': new_value}
- stack_name: $ENVIRON['PREFIX']-stack
- template:
- heat_template_version: pike
- parameters:
- test_val:
- type: string
- resources:
- test:
- type: OS::Heat::TestResource
- properties:
- value: {get_param: test_val}
- action_wait_secs:
- update: 1
- outputs:
- output_value:
- value: {get_attr: [test, output]}
-
- status: 202
-
-- name: poll for stack UPDATE_COMPLETE
- GET: $LAST_URL
- redirects: True
- poll:
- count: 5
- delay: 1.0
- response_json_paths:
- $.stack.stack_status: UPDATE_COMPLETE
-
-- name: patch update stack
- PATCH: $LAST_URL
- request_headers:
- content-type: application/json
- data:
- parameters: {'test_val': new_patched_value}
-
- status: 202
-
-- name: poll for stack patch UPDATE_COMPLETE
- GET: $LAST_URL
- redirects: True
- poll:
- count: 5
- delay: 1.0
- response_json_paths:
- $.stack.stack_status: UPDATE_COMPLETE
- $.stack.updated_time: /^(?!$HISTORY['poll for stack UPDATE_COMPLETE'].$RESPONSE['$.stack.updated_time'])/
-
-- name: list stack outputs
- GET: $LAST_URL/outputs
- redirects: True
- status: 200
- response_json_paths:
- $.outputs[0].output_key: output_value
-
-- name: get stack output
- GET: $LAST_URL/output_value
- redirects: True
- status: 200
- response_json_paths:
- $.output.output_value: new_patched_value
-
-- name: delete stack
- DELETE: /stacks/$ENVIRON['PREFIX']-stack
- redirects: True
- status: 204
diff --git a/heat_integrationtests/api/gabbits/templates.yaml b/heat_integrationtests/api/gabbits/templates.yaml
deleted file mode 100644
index 7b670540c..000000000
--- a/heat_integrationtests/api/gabbits/templates.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-defaults:
- request_headers:
- X-Auth-Token: $ENVIRON['OS_TOKEN']
-
-tests:
-- name: list template versions
- GET: /template_versions
- status: 200
- response_json_paths:
- $.template_versions[?(@.version='heat_template_version.2017-02-24')].type: hot
-
-- name: list template functions
- GET: /template_versions/heat_template_version.2016-10-14/functions
- status: 200
- response_json_paths:
- $.template_functions[?(@.functions='get_file')].description:
- A function for including a file inline.
-
-- name: template validate
- POST: /validate
- request_headers:
- content-type: application/json
- data:
- template:
- heat_template_version: '2016-04-08'
- parameters:
- test_val:
- type: string
- resources:
- test:
- type: OS::Heat::TestResource
- properties:
- value: {get_param: test_val}
- outputs:
- output_value:
- value: {get_attr: [test, output]}
- status: 200
diff --git a/heat_integrationtests/api/test_heat_api.py b/heat_integrationtests/api/test_heat_api.py
deleted file mode 100644
index 2e219e722..000000000
--- a/heat_integrationtests/api/test_heat_api.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A test module to exercise the Heat API with gabbi. """
-
-import os
-
-from gabbi import driver
-from six.moves.urllib import parse as urlparse
-
-from heat_integrationtests.common import clients
-from heat_integrationtests.common import config
-from heat_integrationtests.common import test
-
-TESTS_DIR = 'gabbits'
-
-
-def load_tests(loader, tests, pattern):
- """Provide a TestSuite to the discovery process."""
- test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
-
- conf = config.CONF.heat_plugin
- if conf.auth_url is None:
- # It's not configured, let's not load tests
- return
- manager = clients.ClientManager(conf)
- endpoint = manager.identity_client.get_endpoint_url(
- 'orchestration', conf.region)
- host = urlparse.urlparse(endpoint).hostname
- os.environ['OS_TOKEN'] = manager.identity_client.auth_token
- os.environ['PREFIX'] = test.rand_name('api')
-
- return driver.build_tests(test_dir, loader, host=host,
- url=endpoint, test_loader_name=__name__)
diff --git a/heat_integrationtests/cleanup_test_env.sh b/heat_integrationtests/cleanup_test_env.sh
index a62e00339..9304eb541 100755
--- a/heat_integrationtests/cleanup_test_env.sh
+++ b/heat_integrationtests/cleanup_test_env.sh
@@ -30,4 +30,4 @@ openstack flavor delete m1.heat_int
openstack flavor delete m1.heat_micro
# delete the image created
-openstack image delete Fedora-Cloud-Base-26-1.5.x86_64
+openstack image delete Fedora-Cloud-Base-27-1.6.x86_64
diff --git a/heat_integrationtests/common/clients.py b/heat_integrationtests/common/clients.py
index 0f39f0362..7c8d1196b 100644
--- a/heat_integrationtests/common/clients.py
+++ b/heat_integrationtests/common/clients.py
@@ -13,7 +13,6 @@
import os
from cinderclient import client as cinder_client
-from gnocchiclient import client as gnocchi_client
from heat.common.i18n import _
from heatclient import client as heat_client
from keystoneauth1 import exceptions as kc_exceptions
@@ -65,7 +64,6 @@ class ClientManager(object):
CINDERCLIENT_VERSION = '2'
HEATCLIENT_VERSION = '1'
NOVA_API_VERSION = '2.1'
- GNOCCHI_VERSION = '1'
def __init__(self, conf, admin_credentials=False):
self.conf = conf
@@ -87,7 +85,6 @@ class ClientManager(object):
self.network_client = self._get_network_client()
self.volume_client = self._get_volume_client()
self.object_client = self._get_object_client()
- self.metric_client = self._get_metric_client()
def _username(self):
if self.admin_credentials:
@@ -186,14 +183,3 @@ class ClientManager(object):
'service_type': 'object-store'},
}
return swift_client.Connection(**args)
-
- def _get_metric_client(self):
-
- adapter_options = {'interface': 'public',
- 'region_name': self.conf.region}
- args = {
- 'session': self.identity_client.session,
- 'adapter_options': adapter_options
- }
- return gnocchi_client.Client(version=self.GNOCCHI_VERSION,
- **args)
diff --git a/heat_integrationtests/common/config.py b/heat_integrationtests/common/config.py
index eddac013b..8ec24d833 100644
--- a/heat_integrationtests/common/config.py
+++ b/heat_integrationtests/common/config.py
@@ -10,18 +10,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
+import os
-CONF = None
+from oslo_config import cfg
+from oslo_log import log as logging
-service_available_group = cfg.OptGroup(name="service_available",
- title="Available OpenStack Services")
+import heat_integrationtests
-ServiceAvailableGroup = [
- cfg.BoolOpt("heat_plugin",
- default=True,
- help="Whether or not heat is expected to be available"),
-]
+_CONF = None
heat_group = cfg.OptGroup(name="heat_plugin",
title="Heat Service Options")
@@ -100,32 +96,9 @@ HeatGroup = [
cfg.StrOpt('floating_network_name',
default='public',
help="Visible floating network name "),
- cfg.StrOpt('boot_config_env',
- default=('heat_integrationtests/scenario/templates'
- '/boot_config_none_env.yaml'),
- help="Path to environment file which defines the "
- "resource type Heat::InstallConfigAgent. Needs to "
- "be appropriate for the image_ref."),
cfg.StrOpt('fixed_subnet_name',
default='heat-subnet',
help="Visible fixed sub-network name "),
- cfg.IntOpt('ssh_timeout',
- default=300,
- help="Timeout in seconds to wait for authentication to "
- "succeed."),
- cfg.IntOpt('ip_version_for_ssh',
- default=4,
- help="IP version used for SSH connections."),
- cfg.IntOpt('ssh_channel_timeout',
- default=60,
- help="Timeout in seconds to wait for output from ssh "
- "channel."),
- cfg.IntOpt('tenant_network_mask_bits',
- default=28,
- help="The mask bits for tenant ipv4 subnets"),
- cfg.BoolOpt('skip_scenario_tests',
- default=False,
- help="Skip all scenario tests"),
cfg.BoolOpt('skip_functional_tests',
default=False,
help="Skip all functional tests"),
@@ -133,10 +106,6 @@ HeatGroup = [
help="List of functional test class or class.method "
"names to skip ex. AutoscalingGroupTest, "
"InstanceGroupBasicTest.test_size_updates_work"),
- cfg.ListOpt('skip_scenario_test_list',
- help="List of scenario test class or class.method "
- "names to skip ex. NeutronLoadBalancerTest, "
- "AodhAlarmTest.test_alarm"),
cfg.ListOpt('skip_test_stack_action_list',
help="List of stack actions in tests to skip "
"ex. ABANDON, ADOPT, SUSPEND, RESUME"),
@@ -144,29 +113,35 @@ HeatGroup = [
default=True,
help="Test features that are only present for stacks with "
"convergence enabled."),
- cfg.IntOpt('volume_size',
- default=1,
- help='Default size in GB for volumes created by volumes tests'),
cfg.IntOpt('connectivity_timeout',
default=120,
help="Timeout in seconds to wait for connectivity to "
"server."),
- cfg.IntOpt('sighup_timeout',
- default=120,
- help="Timeout in seconds to wait for adding or removing child "
- "process after receiving of sighup signal"),
- cfg.IntOpt('sighup_config_edit_retries',
- default=10,
- help='Count of retries to edit config file during sighup. If '
- 'another worker already edit config file, file can be '
- 'busy, so need to wait and try edit file again.'),
- cfg.StrOpt('heat_config_notify_script',
- default=('heat-config-notify'),
- help="Path to the script heat-config-notify"),
-
]
+def init_conf(read_conf=True):
+ global _CONF
+ if _CONF is not None:
+ return _CONF
+
+ default_config_files = None
+ if read_conf:
+ confpath = os.path.join(
+ os.path.dirname(os.path.realpath(heat_integrationtests.__file__)),
+ 'heat_integrationtests.conf')
+ if os.path.isfile(confpath):
+ default_config_files = [confpath]
+
+ _CONF = cfg.ConfigOpts()
+ logging.register_options(_CONF)
+ _CONF(args=[], project='heat_integrationtests',
+ default_config_files=default_config_files)
+
+ for group, opts in list_opts():
+ _CONF.register_opts(opts, group=group)
+ return _CONF
+
+
def list_opts():
yield heat_group.name, HeatGroup
- yield service_available_group.name, ServiceAvailableGroup
diff --git a/heat_integrationtests/common/remote_client.py b/heat_integrationtests/common/remote_client.py
deleted file mode 100644
index 201b10f4e..000000000
--- a/heat_integrationtests/common/remote_client.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-import select
-import socket
-import time
-
-from oslo_log import log as logging
-import paramiko
-import six
-
-from heat_integrationtests.common import exceptions
-
-LOG = logging.getLogger(__name__)
-
-
-class Client(object):
-
- def __init__(self, host, username, password=None, timeout=300, pkey=None,
- channel_timeout=10, look_for_keys=False, key_filename=None):
- self.host = host
- self.username = username
- self.password = password
- if isinstance(pkey, six.string_types):
- pkey = paramiko.RSAKey.from_private_key(
- six.moves.cStringIO(str(pkey)))
- self.pkey = pkey
- self.look_for_keys = look_for_keys
- self.key_filename = key_filename
- self.timeout = int(timeout)
- self.channel_timeout = float(channel_timeout)
- self.buf_size = 1024
-
- def _get_ssh_connection(self, sleep=1.5, backoff=1):
- """Returns an ssh connection to the specified host."""
- bsleep = sleep
- ssh = paramiko.SSHClient()
- ssh.set_missing_host_key_policy(
- paramiko.AutoAddPolicy())
- _start_time = time.time()
- if self.pkey is not None:
- LOG.info("Creating ssh connection to '%s' as '%s'"
- " with public key authentication",
- self.host, self.username)
- else:
- LOG.info("Creating ssh connection to '%s' as '%s'"
- " with password %s",
- self.host, self.username, str(self.password))
- attempts = 0
- while True:
- try:
- ssh.connect(self.host, username=self.username,
- password=self.password,
- look_for_keys=self.look_for_keys,
- key_filename=self.key_filename,
- timeout=self.channel_timeout, pkey=self.pkey)
- LOG.info("ssh connection to %s@%s successfuly created",
- self.username, self.host)
- return ssh
- except (socket.error,
- paramiko.SSHException) as e:
- if self._is_timed_out(_start_time):
- LOG.exception("Failed to establish authenticated ssh"
- " connection to %s@%s after %d attempts",
- self.username, self.host, attempts)
- raise exceptions.SSHTimeout(host=self.host,
- user=self.username,
- password=self.password)
- bsleep += backoff
- attempts += 1
- LOG.warning("Failed to establish authenticated ssh"
- " connection to %s@%s (%s). Number attempts: %s."
- " Retry after %d seconds.",
- self.username, self.host, e, attempts, bsleep)
- time.sleep(bsleep)
-
- def _is_timed_out(self, start_time):
- return (time.time() - self.timeout) > start_time
-
- def exec_command(self, cmd):
- """Execute the specified command on the server.
-
- Note that this method is reading whole command outputs to memory, thus
- shouldn't be used for large outputs.
-
- :returns: data read from standard output of the command.
- :raises: SSHExecCommandFailed if command returns nonzero
- status. The exception contains command status stderr content.
- """
- ssh = self._get_ssh_connection()
- transport = ssh.get_transport()
- channel = transport.open_session()
- channel.fileno() # Register event pipe
- channel.exec_command(cmd)
- channel.shutdown_write()
- out_data = []
- err_data = []
- poll = select.poll()
- poll.register(channel, select.POLLIN)
- start_time = time.time()
-
- while True:
- ready = poll.poll(self.channel_timeout)
- if not any(ready):
- if not self._is_timed_out(start_time):
- continue
- raise exceptions.TimeoutException(
- "Command: '{0}' executed on host '{1}'.".format(
- cmd, self.host))
- if not ready[0]: # If there is nothing to read.
- continue
- out_chunk = err_chunk = None
- if channel.recv_ready():
- out_chunk = channel.recv(self.buf_size)
- out_data += out_chunk,
- if channel.recv_stderr_ready():
- err_chunk = channel.recv_stderr(self.buf_size)
- err_data += err_chunk,
- if channel.closed and not err_chunk and not out_chunk:
- break
- exit_status = channel.recv_exit_status()
- if 0 != exit_status:
- raise exceptions.SSHExecCommandFailed(
- command=cmd, exit_status=exit_status,
- strerror=''.join(err_data))
- return ''.join(out_data)
-
- def test_connection_auth(self):
- """Raises an exception when we can not connect to server via ssh."""
- connection = self._get_ssh_connection()
- connection.close()
-
-
-class RemoteClient(object):
-
- # NOTE(afazekas): It should always get an address instead of server
- def __init__(self, server, username, password=None, pkey=None,
- conf=None):
- self.conf = conf
- ssh_timeout = self.conf.ssh_timeout
- network = self.conf.network_for_ssh
- ip_version = self.conf.ip_version_for_ssh
- ssh_channel_timeout = self.conf.ssh_channel_timeout
- if isinstance(server, six.string_types):
- ip_address = server
- else:
- addresses = server['addresses'][network]
- for address in addresses:
- if address['version'] == ip_version:
- ip_address = address['addr']
- break
- else:
- raise exceptions.ServerUnreachable()
- self.ssh_client = Client(ip_address, username, password,
- ssh_timeout, pkey=pkey,
- channel_timeout=ssh_channel_timeout)
-
- def exec_command(self, cmd):
- return self.ssh_client.exec_command(cmd)
-
- def validate_authentication(self):
- """Validate ssh connection and authentication.
-
- This method raises an Exception when the validation fails.
- """
- self.ssh_client.test_connection_auth()
-
- def get_partitions(self):
- # Return the contents of /proc/partitions
- command = 'cat /proc/partitions'
- output = self.exec_command(command)
- return output
-
- def get_boot_time(self):
- cmd = 'cut -f1 -d. /proc/uptime'
- boot_secs = self.exec_command(cmd)
- boot_time = time.time() - int(boot_secs)
- return time.localtime(boot_time)
-
- def write_to_console(self, message):
- message = re.sub("([$\\`])", "\\\\\\\\\\1", message)
- # usually to /dev/ttyS0
- cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message
- return self.exec_command(cmd)
-
- def ping_host(self, host):
- cmd = 'ping -c1 -w1 %s' % host
- return self.exec_command(cmd)
-
- def get_ip_list(self):
- cmd = "/bin/ip address"
- return self.exec_command(cmd)
diff --git a/heat_integrationtests/common/test.py b/heat_integrationtests/common/test.py
index 1abd579df..86d7d0a55 100644
--- a/heat_integrationtests/common/test.py
+++ b/heat_integrationtests/common/test.py
@@ -19,7 +19,6 @@ import time
import fixtures
from heatclient import exc as heat_exceptions
from keystoneauth1 import exceptions as kc_exceptions
-from neutronclient.common import exceptions as network_exceptions
from oslo_log import log as logging
from oslo_utils import timeutils
import six
@@ -30,7 +29,6 @@ import testtools
from heat_integrationtests.common import clients
from heat_integrationtests.common import config
from heat_integrationtests.common import exceptions
-from heat_integrationtests.common import remote_client
LOG = logging.getLogger(__name__)
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
@@ -73,7 +71,8 @@ def requires_convergence(test_method):
The decorated test will be skipped when convergence is disabled.
'''
- convergence_enabled = config.CONF.heat_plugin.convergence_engine_enabled
+ convergence_enabled = config.init_conf(
+ ).heat_plugin.convergence_engine_enabled
skipper = testtools.skipUnless(convergence_enabled,
"Convergence-only tests are disabled")
return skipper(test_method)
@@ -85,7 +84,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
def setUp(self):
super(HeatIntegrationTest, self).setUp()
- self.conf = config.CONF.heat_plugin
+ self.conf = config.init_conf().heat_plugin
self.assertIsNotNone(self.conf.auth_url,
'No auth_url configured')
@@ -109,32 +108,12 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
self.network_client = self.manager.network_client
self.volume_client = self.manager.volume_client
self.object_client = self.manager.object_client
- self.metric_client = self.manager.metric_client
self.client = self.orchestration_client
def setup_clients_for_admin(self):
self.setup_clients(self.conf, True)
- def get_remote_client(self, server_or_ip, username, private_key=None):
- if isinstance(server_or_ip, six.string_types):
- ip = server_or_ip
- else:
- network_name_for_ssh = self.conf.network_for_ssh
- ip = server_or_ip.networks[network_name_for_ssh][0]
- if private_key is None:
- private_key = self.keypair.private_key
- linux_client = remote_client.RemoteClient(ip, username,
- pkey=private_key,
- conf=self.conf)
- try:
- linux_client.validate_authentication()
- except exceptions.SSHTimeout:
- LOG.exception('ssh connection to %s failed', ip)
- raise
-
- return linux_client
-
def check_connectivity(self, check_ip):
def try_connect(ip):
try:
@@ -199,13 +178,6 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
if net['name'] == net_name:
return net
- def is_network_extension_supported(self, extension_alias):
- try:
- self.network_client.show_extension(extension_alias)
- except network_exceptions.NeutronClientException:
- return False
- return True
-
def is_service_available(self, service_type):
try:
self.identity_client.get_endpoint_url(
@@ -366,6 +338,11 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
except heat_exceptions.HTTPNotFound:
if success_on_not_found:
return
+ elif not any(s in status for s in ['CREATE', 'ADOPT']):
+ # raise exception when stack not found, if it's not
+ # in create or adopt (which should be the only two possible
+ # reason that stack may not have been created yet)
+ raise
# ignore this, as the resource may not have
# been created yet
else:
@@ -543,9 +520,10 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
return self.list_resources(nested_identifier)
return self.client.resources.list(nested_identifier)
- def list_resources(self, stack_identifier):
+ def list_resources(self, stack_identifier, filter_func=None):
resources = self.client.resources.list(stack_identifier)
- return dict((r.resource_name, r.resource_type) for r in resources)
+ return dict((r.resource_name, r.resource_type) for r in resources
+ if (filter_func(r) if callable(filter_func) else True))
def get_resource_stack_id(self, r):
stack_link = [l for l in r.links if l.get('rel') == 'stack'][0]
@@ -705,13 +683,13 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
time.sleep(build_interval)
def check_autoscale_complete(self, stack_id, expected_num, parent_stack,
- policy):
+ group_name):
res_list = self.client.resources.list(stack_id)
all_res_complete = all(res.resource_status in ('UPDATE_COMPLETE',
'CREATE_COMPLETE')
for res in res_list)
all_res = len(res_list) == expected_num
if all_res and all_res_complete:
- metadata = self.client.resources.metadata(parent_stack, policy)
+ metadata = self.client.resources.metadata(parent_stack, group_name)
return not metadata.get('scaling_in_progress')
return False
diff --git a/heat_integrationtests/config-generator.conf b/heat_integrationtests/config-generator.conf
new file mode 100644
index 000000000..4c92c4851
--- /dev/null
+++ b/heat_integrationtests/config-generator.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+output_file = heat_integrationtests/heat_integrationtests.conf.sample
+wrap_width = 79
+namespace = heat_integrationtests.common.config
diff --git a/heat_integrationtests/functional/test_conditions.py b/heat_integrationtests/functional/test_conditions.py
index ebb3c0829..9feb7cd2c 100644
--- a/heat_integrationtests/functional/test_conditions.py
+++ b/heat_integrationtests/functional/test_conditions.py
@@ -309,7 +309,8 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
def res_assert_for_prod(self, resources, bj_prod=True, fj_zone=False,
shannxi_provice=False):
- res_names = [res.resource_name for res in resources]
+ res_names = {res.resource_name for res in resources
+ if res.resource_status != 'DELETE_COMPLETE'}
if bj_prod:
self.assertEqual(4, len(resources))
self.assertIn('beijing_prod_res', res_names)
@@ -331,7 +332,8 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
def res_assert_for_test(self, resources, fj_zone=False,
shannxi_provice=False):
- res_names = [res.resource_name for res in resources]
+ res_names = {res.resource_name for res in resources
+ if res.resource_status != 'DELETE_COMPLETE'}
if fj_zone:
self.assertEqual(4, len(resources))
diff --git a/heat_integrationtests/functional/test_create_update.py b/heat_integrationtests/functional/test_create_update.py
index 3d4ca7648..cb5779aab 100644
--- a/heat_integrationtests/functional/test_create_update.py
+++ b/heat_integrationtests/functional/test_create_update.py
@@ -620,6 +620,8 @@ resources:
# Fixing the template should fix the stack
template = _change_rsrc_properties(template,
['test1'], {'fail': False})
+ template['resources']['test2'][
+ 'properties'] = {'action_wait_secs': {'update': 1}}
self.update_stack(stack_identifier,
template=template,
environment=env)
@@ -649,6 +651,8 @@ resources:
template = _change_rsrc_properties(template,
['test2'], {'value': 'Test2'})
+ template['resources']['test1'][
+ 'properties']['action_wait_secs'] = {'create': 1}
self.update_stack(stack_identifier,
template=template,
expected_status='UPDATE_FAILED')
@@ -701,7 +705,11 @@ resources:
expected_status='UPDATE_IN_PROGRESS')
def check_resources():
- resources = self.list_resources(stack_identifier)
+ def is_complete(r):
+ return r.resource_status in {'CREATE_COMPLETE',
+ 'UPDATE_COMPLETE'}
+
+ resources = self.list_resources(stack_identifier, is_complete)
if len(resources) < 2:
return False
self.assertIn('test3', resources)
diff --git a/heat_integrationtests/functional/test_create_update_neutron_port.py b/heat_integrationtests/functional/test_create_update_neutron_port.py
deleted file mode 100644
index bd39bf4cc..000000000
--- a/heat_integrationtests/functional/test_create_update_neutron_port.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-test_template = '''
-heat_template_version: 2015-04-30
-description: Test template to create port wit ip_address.
-parameters:
- mac:
- type: string
- default: 00-00-00-00-BB-BB
-resources:
- net:
- type: OS::Neutron::Net
- subnet:
- type: OS::Neutron::Subnet
- properties:
- enable_dhcp: false
- network: { get_resource: net }
- cidr: 11.11.11.0/24
- port:
- type: OS::Neutron::Port
- properties:
- network: {get_resource: net}
- mac_address: {get_param: mac}
- fixed_ips:
- - subnet: {get_resource: subnet}
- ip_address: 11.11.11.11
- test:
- depends_on: port
- type: OS::Heat::TestResource
- properties:
- value: Test1
- fail: False
-outputs:
- port_ip:
- value: {get_attr: [port, fixed_ips, 0, ip_address]}
- mac_address:
- value: {get_attr: [port, mac_address]}
-'''
-
-
-class UpdatePortTest(functional_base.FunctionalTestsBase):
-
- def get_port_id_and_outputs(self, stack_identifier):
- resources = self.client.resources.list(stack_identifier)
- port_id = [res.physical_resource_id for res in resources
- if res.resource_name == 'port']
- stack = self.client.stacks.get(stack_identifier)
- port_ip = self._stack_output(stack, 'port_ip')
- port_mac = self._stack_output(stack, 'mac_address')
- return port_id[0], port_ip, port_mac
-
- def test_update_remove_ip(self):
- # create with defined ip_address
- stack_identifier = self.stack_create(template=test_template)
- _id, _ip, _mac = self.get_port_id_and_outputs(stack_identifier)
-
- # remove ip_address property and update stack
- templ_no_ip = test_template.replace('ip_address: 11.11.11.11', '')
- self.update_stack(stack_identifier, templ_no_ip)
-
- new_id, new_ip, new_mac = self.get_port_id_and_outputs(
- stack_identifier)
- # port should be updated with the same id
- self.assertEqual(_id, new_id)
- self.assertEqual(_mac, new_mac)
-
- def test_update_with_mac_address(self):
- if not self.conf.admin_username or not self.conf.admin_password:
- self.skipTest('No admin creds found, skipping')
-
- # Setup admin clients for updating mac_address
- self.setup_clients_for_admin()
-
- # Create with default mac_address and defined ip_address
- stack_identifier = self.stack_create(template=test_template)
- _id, _ip, _mac = self.get_port_id_and_outputs(stack_identifier)
-
- # Update with another 'mac' parameter
- parameters = {'mac': '00-00-00-00-AA-AA'}
- self.update_stack(stack_identifier, test_template,
- parameters=parameters)
-
- new_id, new_ip, new_mac = self.get_port_id_and_outputs(
- stack_identifier)
- # mac_address should be different
- self.assertEqual(_id, new_id)
- self.assertEqual(_ip, new_ip)
- self.assertNotEqual(_mac, new_mac)
diff --git a/heat_integrationtests/functional/test_create_update_neutron_subnet.py b/heat_integrationtests/functional/test_create_update_neutron_subnet.py
deleted file mode 100644
index 31ad6f5ba..000000000
--- a/heat_integrationtests/functional/test_create_update_neutron_subnet.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-test_template = '''
-heat_template_version: 2015-04-30
-description: Test template to create/update subnet with allocation_pools.
-resources:
- net:
- type: OS::Neutron::Net
- subnet:
- type: OS::Neutron::Subnet
- properties:
- network: { get_resource: net }
- cidr: 11.11.11.0/24
- gateway_ip: 11.11.11.5
- allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]
-outputs:
- alloc_pools:
- value: {get_attr: [subnet, allocation_pools]}
- gateway_ip:
- value: {get_attr: [subnet, gateway_ip]}
-'''
-
-
-class UpdateSubnetTest(functional_base.FunctionalTestsBase):
-
- def get_outputs(self, stack_identifier, output_key):
- stack = self.client.stacks.get(stack_identifier)
- output = self._stack_output(stack, output_key)
- return output
-
- def test_update_allocation_pools(self):
- stack_identifier = self.stack_create(template=test_template)
- alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
- self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
- alloc_pools)
-
- # Update allocation_pools with a new range
- templ_other_pool = test_template.replace(
- 'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
- 'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.100}]')
- self.update_stack(stack_identifier, templ_other_pool)
- new_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
- # the new pools should be the new range
- self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.100'}],
- new_alloc_pools)
-
- def test_update_allocation_pools_to_empty(self):
- stack_identifier = self.stack_create(template=test_template)
- alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
- self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
- alloc_pools)
-
- # Update allocation_pools with []
- templ_empty_pools = test_template.replace(
- 'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
- 'allocation_pools: []')
- self.update_stack(stack_identifier, templ_empty_pools)
- new_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
- # new_alloc_pools should be []
- self.assertEqual([], new_alloc_pools)
-
- def test_update_to_no_allocation_pools(self):
- stack_identifier = self.stack_create(template=test_template)
- alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
- self.assertEqual([{'start': '11.11.11.10', 'end': '11.11.11.250'}],
- alloc_pools)
-
- # Remove the allocation_pools from template
- templ_no_pools = test_template.replace(
- 'allocation_pools: [{start: 11.11.11.10, end: 11.11.11.250}]',
- '')
- self.update_stack(stack_identifier, templ_no_pools)
- last_alloc_pools = self.get_outputs(stack_identifier, 'alloc_pools')
- # last_alloc_pools should be []
- self.assertEqual([], last_alloc_pools)
-
- def test_update_gateway_ip(self):
- stack_identifier = self.stack_create(template=test_template)
- gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
- self.assertEqual('11.11.11.5', gw_ip)
-
- # Update gateway_ip
- templ_other_gw_ip = test_template.replace(
- 'gateway_ip: 11.11.11.5', 'gateway_ip: 11.11.11.9')
- self.update_stack(stack_identifier, templ_other_gw_ip)
- new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
- # the gateway_ip should be the new one
- self.assertEqual('11.11.11.9', new_gw_ip)
-
- def test_update_gateway_ip_to_empty(self):
- stack_identifier = self.stack_create(template=test_template)
- gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
- self.assertEqual('11.11.11.5', gw_ip)
-
- # Update gateway_ip to null(resolve to '')
- templ_empty_gw_ip = test_template.replace(
- 'gateway_ip: 11.11.11.5', 'gateway_ip: null')
- self.update_stack(stack_identifier, templ_empty_gw_ip)
- new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
- # new gateway_ip should be None
- self.assertIsNone(new_gw_ip)
-
- def test_update_to_no_gateway_ip(self):
- stack_identifier = self.stack_create(template=test_template)
- gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
- self.assertEqual('11.11.11.5', gw_ip)
-
- # Remove the gateway from template
- templ_no_gw_ip = test_template.replace(
- 'gateway_ip: 11.11.11.5', '')
- self.update_stack(stack_identifier, templ_no_gw_ip)
- new_gw_ip = self.get_outputs(stack_identifier, 'gateway_ip')
- # new gateway_ip should be None
- self.assertIsNone(new_gw_ip)
diff --git a/heat_integrationtests/functional/test_create_update_neutron_trunk.py b/heat_integrationtests/functional/test_create_update_neutron_trunk.py
deleted file mode 100644
index b5a108ad0..000000000
--- a/heat_integrationtests/functional/test_create_update_neutron_trunk.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Copyright (c) 2017 Ericsson.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import yaml
-
-from heat_integrationtests.functional import functional_base
-
-
-test_template = '''
-heat_template_version: pike
-description: Test template to create, update, delete trunk.
-resources:
- parent_net:
- type: OS::Neutron::Net
- trunk_net_one:
- type: OS::Neutron::Net
- trunk_net_two:
- type: OS::Neutron::Net
- parent_subnet:
- type: OS::Neutron::Subnet
- properties:
- network: { get_resource: parent_net }
- cidr: 10.0.0.0/16
- trunk_subnet_one:
- type: OS::Neutron::Subnet
- properties:
- network: { get_resource: trunk_net_one }
- cidr: 10.10.0.0/16
- trunk_subnet_two:
- type: OS::Neutron::Subnet
- properties:
- network: { get_resource: trunk_net_two }
- cidr: 10.20.0.0/16
- parent_port:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: parent_net }
- name: trunk_parent_port
- sub_port_one:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: trunk_net_one }
- name: trunk_sub_port_one
- sub_port_two:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: trunk_net_two }
- name: trunk_sub_port_two
- trunk:
- type: OS::Neutron::Trunk
- properties:
- name: test_trunk
- port: { get_resource: parent_port }
- sub_ports:
-outputs:
- trunk_parent_port:
- value: { get_attr: [trunk, port_id] }
-'''
-
-
-class UpdateTrunkTest(functional_base.FunctionalTestsBase):
-
- @staticmethod
- def _sub_ports_dict_to_set(sub_ports):
- new_sub_ports = copy.deepcopy(sub_ports)
-
- # NOTE(lajos katona): In the template we have to give the sub port as
- # port, but from trunk_details we receive back them with port_id.
- # As an extra trunk_details contains the mac_address as well which is
- # useless here.
- # So here we have to make sure that the dictionary (input from
- # template or output from trunk_details) have the same keys:
- if any('mac_address' in d for d in new_sub_ports):
- for sp in new_sub_ports:
- sp['port'] = sp['port_id']
- del sp['port_id']
- del sp['mac_address']
-
- # NOTE(lajos katona): We receive lists (trunk_details['sub_ports'] and
- # the input to the template) and we can't be sure that the order is the
- # same, so by using sets we can compare them.
- sub_ports_set = {frozenset(d.items()) for d in new_sub_ports}
- return sub_ports_set
-
- def test_add_first_sub_port(self):
- stack_identifier = self.stack_create(template=test_template)
-
- parsed_template = yaml.safe_load(test_template)
- new_sub_port = [{'port': {'get_resource': 'sub_port_one'},
- 'segmentation_id': 10,
- 'segmentation_type': 'vlan'}]
- parsed_template['resources']['trunk']['properties'][
- 'sub_ports'] = new_sub_port
- updated_template = yaml.safe_dump(parsed_template)
- self.update_stack(stack_identifier, updated_template)
-
- # Fix the port_id in the template for assertion
- new_sub_port[0]['port'] = self.get_physical_resource_id(
- stack_identifier, 'sub_port_one')
- parent_id = self.get_stack_output(
- stack_identifier, 'trunk_parent_port')
- parent_port = self.network_client.show_port(parent_id)['port']
- trunk_sub_port = parent_port['trunk_details']['sub_ports']
-
- self.assertEqual(self._sub_ports_dict_to_set(new_sub_port),
- self._sub_ports_dict_to_set(trunk_sub_port))
-
- def test_add_a_second_sub_port(self):
- parsed_template = yaml.safe_load(test_template)
- sub_ports = [{'port': {'get_resource': 'sub_port_one'},
- 'segmentation_type': 'vlan',
- 'segmentation_id': 10}, ]
- parsed_template['resources']['trunk']['properties'][
- 'sub_ports'] = sub_ports
- template_with_sub_ports = yaml.safe_dump(parsed_template)
-
- stack_identifier = self.stack_create(template=template_with_sub_ports)
-
- new_sub_port = {'port': {'get_resource': 'sub_port_two'},
- 'segmentation_id': 20,
- 'segmentation_type': 'vlan'}
- parsed_template['resources']['trunk']['properties'][
- 'sub_ports'].append(new_sub_port)
-
- updated_template = yaml.safe_dump(parsed_template)
-
- self.update_stack(stack_identifier, updated_template)
-
- # Fix the port_ids in the templates for assertion
- sub_ports[0]['port'] = self.get_physical_resource_id(
- stack_identifier, 'sub_port_one')
- new_sub_port['port'] = self.get_physical_resource_id(
- stack_identifier, 'sub_port_two')
- expected_sub_ports = [sub_ports[0], new_sub_port]
-
- parent_id = self.get_stack_output(
- stack_identifier, 'trunk_parent_port')
- parent_port = self.network_client.show_port(parent_id)['port']
- trunk_sub_ports = parent_port['trunk_details']['sub_ports']
-
- self.assertEqual(self._sub_ports_dict_to_set(expected_sub_ports),
- self._sub_ports_dict_to_set(trunk_sub_ports))
-
- def test_remove_sub_port_from_trunk(self):
- sub_ports = [{'port': {'get_resource': 'sub_port_one'},
- 'segmentation_type': 'vlan',
- 'segmentation_id': 10},
- {'port': {'get_resource': 'sub_port_two'},
- 'segmentation_type': 'vlan',
- 'segmentation_id': 20}]
- parsed_template = yaml.safe_load(test_template)
- parsed_template['resources']['trunk']['properties'][
- 'sub_ports'] = sub_ports
- template_with_sub_ports = yaml.safe_dump(parsed_template)
-
- stack_identifier = self.stack_create(template=template_with_sub_ports)
-
- sub_port_to_be_removed = {'port': {'get_resource': 'sub_port_two'},
- 'segmentation_type': 'vlan',
- 'segmentation_id': 20}
- parsed_template['resources']['trunk'][
- 'properties']['sub_ports'].remove(sub_port_to_be_removed)
- updated_template = yaml.safe_dump(parsed_template)
-
- self.update_stack(stack_identifier, updated_template)
-
- # Fix the port_ids in the templates for assertion
- sub_ports[0]['port'] = self.get_physical_resource_id(
- stack_identifier, 'sub_port_one')
- expected_sub_ports = [sub_ports[0]]
-
- parent_id = self.get_stack_output(
- stack_identifier, 'trunk_parent_port')
- parent_port = self.network_client.show_port(parent_id)['port']
- trunk_sub_ports = parent_port['trunk_details']['sub_ports']
-
- self.assertEqual(self._sub_ports_dict_to_set(expected_sub_ports),
- self._sub_ports_dict_to_set(trunk_sub_ports))
-
- def test_remove_last_sub_port_from_trunk(self):
- sub_ports = [{'port': {'get_resource': 'sub_port_one'},
- 'segmentation_type': 'vlan',
- 'segmentation_id': 10}]
- parsed_template = yaml.safe_load(test_template)
- parsed_template['resources']['trunk']['properties'][
- 'sub_ports'] = sub_ports
-
- template_with_sub_ports = yaml.safe_dump(parsed_template)
- stack_identifier = self.stack_create(template=template_with_sub_ports)
-
- sub_port_to_be_removed = {'port': {'get_resource': 'sub_port_one'},
- 'segmentation_type': 'vlan',
- 'segmentation_id': 10}
-
- parsed_template['resources']['trunk'][
- 'properties']['sub_ports'] = []
- updated_template = yaml.safe_dump(parsed_template)
-
- self.update_stack(stack_identifier, updated_template)
-
- sub_port_to_be_removed['port'] = self.get_physical_resource_id(
- stack_identifier, 'sub_port_one')
- parent_id = self.get_stack_output(
- stack_identifier, 'trunk_parent_port')
- parent_port = self.network_client.show_port(parent_id)['port']
- trunk_sub_ports = parent_port['trunk_details']['sub_ports']
-
- self.assertNotEqual(
- self._sub_ports_dict_to_set([sub_port_to_be_removed]),
- self._sub_ports_dict_to_set(trunk_sub_ports))
- self.assertFalse(trunk_sub_ports,
- 'The returned sub ports (%s) in trunk_details is '
- 'not empty!' % trunk_sub_ports)
-
- def test_update_existing_sub_port_on_trunk(self):
- sub_ports = [{'port': {'get_resource': 'sub_port_one'},
- 'segmentation_type': 'vlan',
- 'segmentation_id': 10}]
- parsed_template = yaml.safe_load(test_template)
- parsed_template['resources']['trunk']['properties'][
- 'sub_ports'] = sub_ports
-
- template_with_sub_ports = yaml.safe_dump(parsed_template)
- stack_identifier = self.stack_create(template=template_with_sub_ports)
-
- sub_port_id = self.get_physical_resource_id(
- stack_identifier, 'sub_port_one')
- parsed_template['resources']['trunk']['properties']['sub_ports'][0][
- 'segmentation_id'] = 99
- updated_template = yaml.safe_dump(parsed_template)
-
- self.update_stack(stack_identifier, updated_template)
- updated_sub_port = {'port': sub_port_id,
- 'segmentation_type': 'vlan',
- 'segmentation_id': 99}
- parent_id = self.get_stack_output(
- stack_identifier, 'trunk_parent_port')
- parent_port = self.network_client.show_port(parent_id)['port']
- trunk_sub_ports = parent_port['trunk_details']['sub_ports']
-
- self.assertEqual(self._sub_ports_dict_to_set([updated_sub_port]),
- self._sub_ports_dict_to_set(trunk_sub_ports))
-
- def test_update_trunk_name_and_description(self):
- new_name = 'pineapple'
- new_description = 'This is a test trunk'
-
- stack_identifier = self.stack_create(template=test_template)
- parsed_template = yaml.safe_load(test_template)
- parsed_template['resources']['trunk']['properties']['name'] = new_name
- parsed_template['resources']['trunk']['properties'][
- 'description'] = new_description
- updated_template = yaml.safe_dump(parsed_template)
- self.update_stack(stack_identifier, template=updated_template)
-
- parent_id = self.get_stack_output(
- stack_identifier, 'trunk_parent_port')
- parent_port = self.network_client.show_port(parent_id)['port']
- trunk_id = parent_port['trunk_details']['trunk_id']
-
- trunk = self.network_client.show_trunk(trunk_id)['trunk']
- self.assertEqual(new_name, trunk['name'])
- self.assertEqual(new_description, trunk['description'])
diff --git a/heat_integrationtests/functional/test_encrypted_parameter.py b/heat_integrationtests/functional/test_encrypted_parameter.py
deleted file mode 100644
index 5ec8a27e2..000000000
--- a/heat_integrationtests/functional/test_encrypted_parameter.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-class EncryptedParametersTest(functional_base.FunctionalTestsBase):
-
- template = '''
-heat_template_version: 2014-10-16
-parameters:
- image:
- type: string
- flavor:
- type: string
- network:
- type: string
- foo:
- type: string
- description: 'parameter with encryption turned on'
- hidden: true
- default: secret
-resources:
- server_with_encrypted_property:
- type: OS::Nova::Server
- properties:
- name: { get_param: foo }
- image: { get_param: image }
- flavor: { get_param: flavor }
- networks: [{network: {get_param: network} }]
-outputs:
- encrypted_foo_param:
- description: 'encrypted param'
- value: { get_param: foo }
-'''
-
- def test_db_encryption(self):
- # Create a stack with the value of 'foo' to be encrypted
- foo_param = 'my_encrypted_foo'
- parameters = {
- "image": self.conf.minimal_image_ref,
- "flavor": self.conf.minimal_instance_type,
- 'network': self.conf.fixed_network_name,
- "foo": foo_param
- }
-
- stack_identifier = self.stack_create(
- template=self.template,
- parameters=parameters
- )
- stack = self.client.stacks.get(stack_identifier)
-
- # Verify the output value for 'foo' parameter
- for out in stack.outputs:
- if out['output_key'] == 'encrypted_foo_param':
- self.assertEqual(foo_param, out['output_value'])
diff --git a/heat_integrationtests/functional/test_encryption_vol_type.py b/heat_integrationtests/functional/test_encryption_vol_type.py
deleted file mode 100644
index b34b094ab..000000000
--- a/heat_integrationtests/functional/test_encryption_vol_type.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from heat_integrationtests.functional import functional_base
-
-test_encryption_vol_type = {
- 'heat_template_version': '2015-04-30',
- 'description': 'Test template to create encryption volume type.',
- 'resources': {
- 'my_volume_type': {
- 'type': 'OS::Cinder::VolumeType',
- 'properties': {
- 'name': 'LUKS'
- }
- },
- 'my_encrypted_vol_type': {
- 'type': 'OS::Cinder::EncryptedVolumeType',
- 'properties': {
- 'provider': 'nova.volume.encryptors.luks.LuksEncryptor',
- 'control_location': 'front-end',
- 'cipher': 'aes-xts-plain64',
- 'key_size': 512,
- 'volume_type': {'get_resource': 'my_volume_type'}
- }
- }
- }
-}
-
-
-class EncryptionVolTypeTest(functional_base.FunctionalTestsBase):
- def setUp(self):
- super(EncryptionVolTypeTest, self).setUp()
- if not self.conf.admin_username or not self.conf.admin_password:
- self.skipTest('No admin creds found, skipping')
- # cinder security policy usage of volume type is limited
- # to being used by administrators only.
- # Switch to admin
- self.setup_clients_for_admin()
-
- def check_stack(self, sid):
- vt = 'my_volume_type'
- e_vt = 'my_encrypted_vol_type'
-
- # check if only two resources are present.
- expected_resources = {vt: 'OS::Cinder::VolumeType',
- e_vt: 'OS::Cinder::EncryptedVolumeType'}
- self.assertEqual(expected_resources,
- self.list_resources(sid))
-
- e_vt_obj = self.client.resources.get(sid, e_vt)
- my_encrypted_vol_type_tmpl_prop = test_encryption_vol_type[
- 'resources']['my_encrypted_vol_type']['properties']
-
- # check if the phy rsrc specs was created in accordance with template.
- phy_rsrc_specs = self.volume_client.volume_encryption_types.get(
- e_vt_obj.physical_resource_id)
- self.assertEqual(my_encrypted_vol_type_tmpl_prop['key_size'],
- phy_rsrc_specs.key_size)
- self.assertEqual(my_encrypted_vol_type_tmpl_prop['provider'],
- phy_rsrc_specs.provider)
- self.assertEqual(my_encrypted_vol_type_tmpl_prop['cipher'],
- phy_rsrc_specs.cipher)
- self.assertEqual(my_encrypted_vol_type_tmpl_prop['control_location'],
- phy_rsrc_specs.control_location)
-
- def test_create_update(self):
- stack_identifier = self.stack_create(
- template=test_encryption_vol_type)
- self.check_stack(stack_identifier)
-
- # Change some properties and trigger update.
- my_encrypted_vol_type_tmpl_prop = test_encryption_vol_type[
- 'resources']['my_encrypted_vol_type']['properties']
- my_encrypted_vol_type_tmpl_prop['key_size'] = 256
- my_encrypted_vol_type_tmpl_prop['cipher'] = 'aes-cbc-essiv'
- self.update_stack(stack_identifier, test_encryption_vol_type)
- self.check_stack(stack_identifier)
diff --git a/heat_integrationtests/functional/test_event_sinks.py b/heat_integrationtests/functional/test_event_sinks.py
deleted file mode 100644
index 61f1bfabb..000000000
--- a/heat_integrationtests/functional/test_event_sinks.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-from zaqarclient.queues.v2 import client as zaqarclient
-
-from heat_integrationtests.common import test
-from heat_integrationtests.functional import functional_base
-
-
-class ZaqarEventSinkTest(functional_base.FunctionalTestsBase):
- template = '''
-heat_template_version: "2013-05-23"
-resources:
- test_resource:
- type: OS::Heat::TestResource
- properties:
- value: ok
-'''
-
- def test_events(self):
- queue_id = str(uuid.uuid4())
- environment = {'event_sinks': [{'type': 'zaqar-queue',
- 'target': queue_id,
- 'ttl': 120}]}
- stack_identifier = self.stack_create(
- template=self.template,
- environment=environment)
- stack_name, stack_id = stack_identifier.split('/')
- conf = {
- 'auth_opts': {
- 'backend': 'keystone',
- 'options': {
- 'os_username': self.conf.username,
- 'os_password': self.conf.password,
- 'os_project_name': self.conf.project_name,
- 'os_auth_url': self.conf.auth_url,
- 'os_user_domain_id': self.conf.user_domain_id,
- 'os_project_domain_id': self.conf.project_domain_id,
- 'os_user_domain_name': self.conf.user_domain_name,
- 'os_project_domain_name': self.conf.project_domain_name
- }
- }
- }
-
- zaqar = zaqarclient.Client(conf=conf)
- queue = zaqar.queue(queue_id)
-
- def validate_messages():
- messages = list(queue.messages())
- if len(messages) < 4:
- return False
-
- types = [m.body['type'] for m in messages]
- self.assertEqual(['os.heat.event'] * 4, types)
- resources = set([m.body['payload'][
- 'resource_name'] for m in messages])
- self.assertEqual(set([stack_name, 'test_resource']), resources)
- stack_ids = [m.body['payload']['stack_id'] for m in messages]
- self.assertEqual([stack_id] * 4, stack_ids)
- statuses = [m.body['payload']['resource_status'] for m in messages]
- statuses.sort()
- self.assertEqual(['COMPLETE', 'COMPLETE',
- 'IN_PROGRESS', 'IN_PROGRESS'], statuses)
- actions = [m.body['payload']['resource_action'] for m in messages]
- self.assertEqual(['CREATE'] * 4, actions)
- return True
-
- self.assertTrue(test.call_until_true(20, 0, validate_messages))
diff --git a/heat_integrationtests/functional/test_external_ref.py b/heat_integrationtests/functional/test_external_ref.py
deleted file mode 100644
index 2601ca728..000000000
--- a/heat_integrationtests/functional/test_external_ref.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-class ExternalReferencesTest(functional_base.FunctionalTestsBase):
-
- TEMPLATE = '''
-heat_template_version: 2016-10-14
-resources:
- test1:
- type: OS::Heat::TestResource
-'''
- TEMPLATE_WITH_EX_REF = '''
-heat_template_version: 2016-10-14
-resources:
- test1:
- type: OS::Heat::TestResource
- external_id: foobar
-outputs:
- str:
- value: {get_resource: test1}
-'''
-
- def test_create_with_external_ref(self):
- stack_name = self._stack_rand_name()
- stack_identifier = self.stack_create(
- stack_name=stack_name,
- template=self.TEMPLATE_WITH_EX_REF,
- files={},
- disable_rollback=True,
- parameters={},
- environment={}
- )
-
- stack = self.client.stacks.get(stack_identifier)
-
- self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
- expected_resources = {'test1': 'OS::Heat::TestResource'}
- self.assertEqual(expected_resources,
- self.list_resources(stack_identifier))
- stack = self.client.stacks.get(stack_identifier)
- self.assertEqual(
- [{'description': 'No description given',
- 'output_key': 'str',
- 'output_value': 'foobar'}], stack.outputs)
-
- def test_update_with_external_ref(self):
- stack_name = self._stack_rand_name()
- stack_identifier = self.stack_create(
- stack_name=stack_name,
- template=self.TEMPLATE,
- files={},
- disable_rollback=True,
- parameters={},
- environment={}
- )
- stack = self.client.stacks.get(stack_identifier)
-
- self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
- expected_resources = {'test1': 'OS::Heat::TestResource'}
- self.assertEqual(expected_resources,
- self.list_resources(stack_identifier))
- stack = self.client.stacks.get(stack_identifier)
- self.assertEqual([], stack.outputs)
-
- stack_name = stack_identifier.split('/')[0]
- kwargs = {'stack_id': stack_identifier, 'stack_name': stack_name,
- 'template': self.TEMPLATE_WITH_EX_REF, 'files': {},
- 'disable_rollback': True, 'parameters': {}, 'environment': {}
- }
- self.client.stacks.update(**kwargs)
- self._wait_for_stack_status(stack_identifier, 'UPDATE_FAILED')
diff --git a/heat_integrationtests/functional/test_heat_autoscaling.py b/heat_integrationtests/functional/test_heat_autoscaling.py
index 474e1c3af..cd1c9c73c 100644
--- a/heat_integrationtests/functional/test_heat_autoscaling.py
+++ b/heat_integrationtests/functional/test_heat_autoscaling.py
@@ -120,7 +120,7 @@ outputs:
self.check_autoscale_complete,
asg.physical_resource_id,
expected_resources, stack_id,
- 'scale_up_policy'))
+ 'random_group'))
def test_asg_scale_down_min_size(self):
stack_id = self.stack_create(template=self.template,
@@ -142,7 +142,7 @@ outputs:
self.check_autoscale_complete,
asg.physical_resource_id,
expected_resources, stack_id,
- 'scale_down_policy'))
+ 'random_group'))
def test_asg_cooldown(self):
cooldown_tmpl = self.template.replace('cooldown: 0',
@@ -165,7 +165,7 @@ outputs:
self.check_autoscale_complete,
asg.physical_resource_id,
expected_resources, stack_id,
- 'scale_up_policy'))
+ 'random_group'))
def test_path_attrs(self):
stack_id = self.stack_create(template=self.template)
diff --git a/heat_integrationtests/functional/test_hooks.py b/heat_integrationtests/functional/test_hooks.py
deleted file mode 100644
index bafb0ef19..000000000
--- a/heat_integrationtests/functional/test_hooks.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import yaml
-
-from heat_integrationtests.functional import functional_base
-
-
-class HooksTest(functional_base.FunctionalTestsBase):
-
- def setUp(self):
- super(HooksTest, self).setUp()
- self.template = {'heat_template_version': '2014-10-16',
- 'resources': {
- 'foo_step1': {'type': 'OS::Heat::RandomString'},
- 'foo_step2': {'type': 'OS::Heat::RandomString',
- 'depends_on': 'foo_step1'},
- 'foo_step3': {'type': 'OS::Heat::RandomString',
- 'depends_on': 'foo_step2'}}}
-
- def test_hook_pre_create(self):
- env = {'resource_registry':
- {'resources':
- {'foo_step2':
- {'hooks': 'pre-create'}}}}
- # Note we don't wait for CREATE_COMPLETE, because we need to
- # signal to clear the hook before create will complete
- stack_identifier = self.stack_create(
- template=self.template,
- environment=env,
- expected_status='CREATE_IN_PROGRESS')
- self._wait_for_resource_status(
- stack_identifier, 'foo_step1', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
- stack_identifier, 'foo_step2', 'INIT_COMPLETE')
- ev = self.wait_for_event_with_reason(
- stack_identifier,
- reason='CREATE paused until Hook pre-create is cleared',
- rsrc_name='foo_step2')
- self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
- self.client.resources.signal(stack_identifier, 'foo_step2',
- data={'unset_hook': 'pre-create'})
- ev = self.wait_for_event_with_reason(
- stack_identifier,
- reason='Hook pre-create is cleared',
- rsrc_name='foo_step2')
- self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
- self._wait_for_resource_status(
- stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
- self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
-
- def test_hook_pre_update_nochange(self):
- env = {'resource_registry':
- {'resources':
- {'foo_step2':
- {'hooks': 'pre-update'}}}}
- stack_identifier = self.stack_create(
- template=self.template,
- environment=env)
- res_before = self.client.resources.get(stack_identifier, 'foo_step2')
- # Note we don't wait for UPDATE_COMPLETE, because we need to
- # signal to clear the hook before update will complete
- self.update_stack(
- stack_identifier,
- template=self.template,
- environment=env,
- expected_status='UPDATE_IN_PROGRESS')
-
- # Note when a hook is specified, the resource status doesn't change
- # when we hit the hook, so we look for the event, then assert the
- # state is unchanged.
- self._wait_for_resource_status(
- stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
- ev = self.wait_for_event_with_reason(
- stack_identifier,
- reason='UPDATE paused until Hook pre-update is cleared',
- rsrc_name='foo_step2')
- self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
- self.client.resources.signal(stack_identifier, 'foo_step2',
- data={'unset_hook': 'pre-update'})
- ev = self.wait_for_event_with_reason(
- stack_identifier,
- reason='Hook pre-update is cleared',
- rsrc_name='foo_step2')
- self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
- self._wait_for_resource_status(
- stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
- self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
- res_after = self.client.resources.get(stack_identifier, 'foo_step2')
- self.assertEqual(res_before.physical_resource_id,
- res_after.physical_resource_id)
-
- def test_hook_pre_update_replace(self):
- env = {'resource_registry':
- {'resources':
- {'foo_step2':
- {'hooks': 'pre-update'}}}}
- stack_identifier = self.stack_create(
- template=self.template,
- environment=env)
- res_before = self.client.resources.get(stack_identifier, 'foo_step2')
- # Note we don't wait for UPDATE_COMPLETE, because we need to
- # signal to clear the hook before update will complete
- self.template['resources']['foo_step2']['properties'] = {'length': 10}
- self.update_stack(
- stack_identifier,
- template=self.template,
- environment=env,
- expected_status='UPDATE_IN_PROGRESS')
-
- # Note when a hook is specified, the resource status doesn't change
- # when we hit the hook, so we look for the event, then assert the
- # state is unchanged.
- self._wait_for_resource_status(
- stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
- ev = self.wait_for_event_with_reason(
- stack_identifier,
- reason='UPDATE paused until Hook pre-update is cleared',
- rsrc_name='foo_step2')
- self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
- self.client.resources.signal(stack_identifier, 'foo_step2',
- data={'unset_hook': 'pre-update'})
- ev = self.wait_for_event_with_reason(
- stack_identifier,
- reason='Hook pre-update is cleared',
- rsrc_name='foo_step2')
- self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
- self._wait_for_resource_status(
- stack_identifier, 'foo_step2', 'CREATE_COMPLETE')
- self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
- res_after = self.client.resources.get(stack_identifier, 'foo_step2')
- self.assertNotEqual(res_before.physical_resource_id,
- res_after.physical_resource_id)
-
- def test_hook_pre_update_in_place(self):
- env = {'resource_registry':
- {'resources':
- {'rg':
- {'hooks': 'pre-update'}}}}
- template = {'heat_template_version': '2014-10-16',
- 'resources': {
- 'rg': {
- 'type': 'OS::Heat::ResourceGroup',
- 'properties': {
- 'count': 1,
- 'resource_def': {
- 'type': 'OS::Heat::RandomString'}}}}}
- # Note we don't wait for CREATE_COMPLETE, because we need to
- # signal to clear the hook before create will complete
- stack_identifier = self.stack_create(
- template=template,
- environment=env)
- res_before = self.client.resources.get(stack_identifier, 'rg')
- template['resources']['rg']['properties']['count'] = 2
- self.update_stack(
- stack_identifier,
- template=template,
- environment=env,
- expected_status='UPDATE_IN_PROGRESS')
-
- # Note when a hook is specified, the resource status doesn't change
- # when we hit the hook, so we look for the event, then assert the
- # state is unchanged.
- self._wait_for_resource_status(
- stack_identifier, 'rg', 'CREATE_COMPLETE')
- ev = self.wait_for_event_with_reason(
- stack_identifier,
- reason='UPDATE paused until Hook pre-update is cleared',
- rsrc_name='rg')
- self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
- self.client.resources.signal(stack_identifier, 'rg',
- data={'unset_hook': 'pre-update'})
-
- ev = self.wait_for_event_with_reason(
- stack_identifier,
- reason='Hook pre-update is cleared',
- rsrc_name='rg')
- self.assertEqual('CREATE_COMPLETE', ev[0].resource_status)
- self._wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
- res_after = self.client.resources.get(stack_identifier, 'rg')
- self.assertEqual(res_before.physical_resource_id,
- res_after.physical_resource_id)
-
- def test_hook_pre_create_nested(self):
- files = {'nested.yaml': yaml.safe_dump(self.template)}
- env = {'resource_registry':
- {'resources':
- {'nested':
- {'foo_step2':
- {'hooks': 'pre-create'}}}}}
- template = {'heat_template_version': '2014-10-16',
- 'resources': {
- 'nested': {'type': 'nested.yaml'}}}
- # Note we don't wait for CREATE_COMPLETE, because we need to
- # signal to clear the hook before create will complete
- stack_identifier = self.stack_create(
- template=template,
- environment=env,
- files=files,
- expected_status='CREATE_IN_PROGRESS')
- self._wait_for_resource_status(stack_identifier, 'nested',
- 'CREATE_IN_PROGRESS')
- nested_identifier = self.assert_resource_is_a_stack(
- stack_identifier, 'nested', wait=True)
- self._wait_for_resource_status(
- nested_identifier, 'foo_step1', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
- nested_identifier, 'foo_step2', 'INIT_COMPLETE')
- ev = self.wait_for_event_with_reason(
- nested_identifier,
- reason='CREATE paused until Hook pre-create is cleared',
- rsrc_name='foo_step2')
- self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
- self.client.resources.signal(nested_identifier, 'foo_step2',
- data={'unset_hook': 'pre-create'})
- ev = self.wait_for_event_with_reason(
- nested_identifier,
- reason='Hook pre-create is cleared',
- rsrc_name='foo_step2')
- self.assertEqual('INIT_COMPLETE', ev[0].resource_status)
- self._wait_for_resource_status(
- nested_identifier, 'foo_step2', 'CREATE_COMPLETE')
- self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
-
- def test_hook_pre_create_wildcard(self):
- env = {'resource_registry':
- {'resources':
- {'foo_*':
- {'hooks': 'pre-create'}}}}
- # Note we don't wait for CREATE_COMPLETE, because we need to
- # signal to clear the hook before create will complete
- stack_identifier = self.stack_create(
- template=self.template,
- environment=env,
- expected_status='CREATE_IN_PROGRESS')
- self._wait_for_resource_status(
- stack_identifier, 'foo_step1', 'INIT_COMPLETE')
- self.wait_for_event_with_reason(
- stack_identifier,
- reason='CREATE paused until Hook pre-create is cleared',
- rsrc_name='foo_step1')
- self.client.resources.signal(stack_identifier, 'foo_step1',
- data={'unset_hook': 'pre-create'})
- self.wait_for_event_with_reason(
- stack_identifier,
- reason='Hook pre-create is cleared',
- rsrc_name='foo_step1')
- self._wait_for_resource_status(
- stack_identifier, 'foo_step2', 'INIT_COMPLETE')
- self.wait_for_event_with_reason(
- stack_identifier,
- reason='CREATE paused until Hook pre-create is cleared',
- rsrc_name='foo_step2')
- self.client.resources.signal(stack_identifier, 'foo_step2',
- data={'unset_hook': 'pre-create'})
- self.wait_for_event_with_reason(
- stack_identifier,
- reason='Hook pre-create is cleared',
- rsrc_name='foo_step2')
- self._wait_for_resource_status(
- stack_identifier, 'foo_step3', 'INIT_COMPLETE')
- self.wait_for_event_with_reason(
- stack_identifier,
- reason='CREATE paused until Hook pre-create is cleared',
- rsrc_name='foo_step3')
- self.client.resources.signal(stack_identifier, 'foo_step3',
- data={'unset_hook': 'pre-create'})
- self.wait_for_event_with_reason(
- stack_identifier,
- reason='Hook pre-create is cleared',
- rsrc_name='foo_step3')
- self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
diff --git a/heat_integrationtests/functional/test_lbaasv2.py b/heat_integrationtests/functional/test_lbaasv2.py
deleted file mode 100644
index e7f56ef0a..000000000
--- a/heat_integrationtests/functional/test_lbaasv2.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from heat_integrationtests.functional import functional_base
-
-
-class LoadBalancerv2Test(functional_base.FunctionalTestsBase):
-
- create_template = '''
-heat_template_version: 2016-04-08
-parameters:
- subnet:
- type: string
-resources:
- loadbalancer:
- type: OS::Neutron::LBaaS::LoadBalancer
- properties:
- description: aLoadBalancer
- vip_subnet: { get_param: subnet }
- listener:
- type: OS::Neutron::LBaaS::Listener
- properties:
- description: aListener
- loadbalancer: { get_resource: loadbalancer }
- protocol: HTTP
- protocol_port: 80
- connection_limit: 5555
- pool:
- type: OS::Neutron::LBaaS::Pool
- properties:
- description: aPool
- lb_algorithm: ROUND_ROBIN
- protocol: HTTP
- listener: { get_resource: listener }
- poolmember:
- type: OS::Neutron::LBaaS::PoolMember
- properties:
- address: 1.1.1.1
- pool: { get_resource: pool }
- protocol_port: 1111
- subnet: { get_param: subnet }
- weight: 255
- # pm2
- healthmonitor:
- type: OS::Neutron::LBaaS::HealthMonitor
- properties:
- delay: 3
- type: HTTP
- timeout: 3
- max_retries: 3
- pool: { get_resource: pool }
-outputs:
- loadbalancer:
- value: { get_attr: [ loadbalancer, show ] }
- pool:
- value: { get_attr: [ pool, show ] }
- poolmember:
- value: { get_attr: [ poolmember, show ] }
- listener:
- value: { get_attr: [ listener, show ] }
- healthmonitor:
- value: { get_attr: [ healthmonitor, show ] }
-'''
-
- add_member = '''
- poolmember2:
- type: OS::Neutron::LBaaS::PoolMember
- properties:
- address: 2.2.2.2
- pool: { get_resource: pool }
- protocol_port: 2222
- subnet: { get_param: subnet }
- weight: 222
-'''
-
- def setUp(self):
- super(LoadBalancerv2Test, self).setUp()
- if not self.is_network_extension_supported('lbaasv2'):
- self.skipTest('LBaasv2 extension not available, skipping')
-
- def test_create_update_loadbalancer(self):
- parameters = {
- 'subnet': self.conf.fixed_subnet_name,
- }
- stack_identifier = self.stack_create(template=self.create_template,
- parameters=parameters)
- stack = self.client.stacks.get(stack_identifier)
- output = self._stack_output(stack, 'loadbalancer')
- self.assertEqual('ONLINE', output['operating_status'])
-
- template = self.create_template.replace('ROUND_ROBIN', 'SOURCE_IP')
- template = template.replace('3', '6')
- template = template.replace('255', '256')
- template = template.replace('5555', '7777')
- template = template.replace('aLoadBalancer', 'updatedLoadBalancer')
- template = template.replace('aPool', 'updatedPool')
- template = template.replace('aListener', 'updatedListener')
- self.update_stack(stack_identifier, template=template,
- parameters=parameters)
- stack = self.client.stacks.get(stack_identifier)
-
- output = self._stack_output(stack, 'loadbalancer')
- self.assertEqual('ONLINE', output['operating_status'])
- self.assertEqual('updatedLoadBalancer', output['description'])
- output = self._stack_output(stack, 'pool')
- self.assertEqual('SOURCE_IP', output['lb_algorithm'])
- self.assertEqual('updatedPool', output['description'])
- output = self._stack_output(stack, 'poolmember')
- self.assertEqual(256, output['weight'])
- output = self._stack_output(stack, 'healthmonitor')
- self.assertEqual(6, output['delay'])
- self.assertEqual(6, output['timeout'])
- self.assertEqual(6, output['max_retries'])
- output = self._stack_output(stack, 'listener')
- self.assertEqual(7777, output['connection_limit'])
- self.assertEqual('updatedListener', output['description'])
-
- def test_add_delete_poolmember(self):
- parameters = {
- 'subnet': self.conf.fixed_subnet_name,
- }
- stack_identifier = self.stack_create(template=self.create_template,
- parameters=parameters)
- stack = self.client.stacks.get(stack_identifier)
- output = self._stack_output(stack, 'loadbalancer')
- self.assertEqual('ONLINE', output['operating_status'])
- output = self._stack_output(stack, 'pool')
- self.assertEqual(1, len(output['members']))
- # add pool member
- template = self.create_template.replace('# pm2', self.add_member)
- self.update_stack(stack_identifier, template=template,
- parameters=parameters)
- stack = self.client.stacks.get(stack_identifier)
- output = self._stack_output(stack, 'loadbalancer')
- self.assertEqual('ONLINE', output['operating_status'])
- output = self._stack_output(stack, 'pool')
- self.assertEqual(2, len(output['members']))
- # delete pool member
- self.update_stack(stack_identifier, template=self.create_template,
- parameters=parameters)
- stack = self.client.stacks.get(stack_identifier)
- output = self._stack_output(stack, 'loadbalancer')
- self.assertEqual('ONLINE', output['operating_status'])
- output = self._stack_output(stack, 'pool')
- self.assertEqual(1, len(output['members']))
diff --git a/heat_integrationtests/functional/test_nova_server_networks.py b/heat_integrationtests/functional/test_nova_server_networks.py
deleted file mode 100644
index 9b6b5e6d8..000000000
--- a/heat_integrationtests/functional/test_nova_server_networks.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-server_with_sub_fixed_ip_template = '''
-heat_template_version: 2016-04-08
-description: Test template to test nova server with subnet and fixed_ip.
-parameters:
- flavor:
- type: string
- image:
- type: string
-resources:
- net:
- type: OS::Neutron::Net
- properties:
- name: my_net
- subnet:
- type: OS::Neutron::Subnet
- properties:
- network: {get_resource: net}
- cidr: 11.11.11.0/24
- security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- name: the_sg
- description: Ping and SSH
- rules:
- - protocol: icmp
- - protocol: tcp
- port_range_min: 22
- port_range_max: 22
- server:
- type: OS::Nova::Server
- properties:
- image: {get_param: image}
- flavor: {get_param: flavor}
- networks:
- - subnet: {get_resource: subnet}
- fixed_ip: 11.11.11.11
- security_groups:
- - {get_resource: security_group}
-outputs:
- networks:
- value: {get_attr: [server, networks]}
-'''
-
-server_with_port_template = '''
-heat_template_version: 2016-04-08
-description: Test template to test nova server with port.
-parameters:
- flavor:
- type: string
- image:
- type: string
-resources:
- net:
- type: OS::Neutron::Net
- properties:
- name: server_with_port_net
- subnet:
- type: OS::Neutron::Subnet
- properties:
- network: {get_resource: net}
- cidr: 11.11.11.0/24
- port:
- type: OS::Neutron::Port
- properties:
- network: {get_resource: net}
- fixed_ips:
- - subnet: {get_resource: subnet}
- ip_address: 11.11.11.11
- server:
- type: OS::Nova::Server
- properties:
- image: {get_param: image}
- flavor: {get_param: flavor}
- networks:
- - port: {get_resource: port}
-'''
-
-
-class CreateServerTest(functional_base.FunctionalTestsBase):
-
- def get_outputs(self, stack_identifier, output_key):
- stack = self.client.stacks.get(stack_identifier)
- return self._stack_output(stack, output_key)
-
- def test_create_server_with_subnet_fixed_ip_sec_group(self):
- parms = {'flavor': self.conf.minimal_instance_type,
- 'image': self.conf.minimal_image_ref}
- stack_identifier = self.stack_create(
- template=server_with_sub_fixed_ip_template,
- stack_name='server_with_sub_ip',
- parameters=parms)
-
- networks = self.get_outputs(stack_identifier, 'networks')
- self.assertEqual(['11.11.11.11'], networks['my_net'])
-
- server_resource = self.client.resources.get(
- stack_identifier, 'server')
- server_id = server_resource.physical_resource_id
- server = self.compute_client.servers.get(server_id)
- self.assertEqual([{"name": "the_sg"}], server.security_groups)
-
- def test_create_update_server_with_subnet(self):
- parms = {'flavor': self.conf.minimal_instance_type,
- 'image': self.conf.minimal_image_ref}
- template = server_with_sub_fixed_ip_template.replace(
- 'fixed_ip: 11.11.11.11',
- 'fixed_ip: 11.11.11.22').replace(
- 'name: my_net', 'name: your_net')
- stack_identifier = self.stack_create(
- template=template,
- stack_name='create_server_with_sub_ip',
- parameters=parms)
- networks = self.get_outputs(stack_identifier, 'networks')
- self.assertEqual(['11.11.11.22'], networks['your_net'])
-
- # update the server only with subnet, we won't pass
- # both port_id and net_id to attach interface, then update success
- template_only_subnet = template.replace(
- 'fixed_ip: 11.11.11.22', '')
- self.update_stack(stack_identifier,
- template_only_subnet,
- parameters=parms)
- new_networks = self.get_outputs(stack_identifier, 'networks')
- self.assertNotEqual(['11.11.11.22'], new_networks['your_net'])
-
- def test_create_server_with_port(self):
- parms = {'flavor': self.conf.minimal_instance_type,
- 'image': self.conf.minimal_image_ref}
- # We just want to make sure we can create the server, no need to assert
- # anything
- self.stack_create(
- template=server_with_port_template,
- stack_name='server_with_port',
- parameters=parms)
diff --git a/heat_integrationtests/functional/test_os_wait_condition.py b/heat_integrationtests/functional/test_os_wait_condition.py
deleted file mode 100644
index 5c1783af7..000000000
--- a/heat_integrationtests/functional/test_os_wait_condition.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-class OSWaitCondition(functional_base.FunctionalTestsBase):
-
- template = '''
-heat_template_version: 2013-05-23
-parameters:
- flavor:
- type: string
- image:
- type: string
- network:
- type: string
- timeout:
- type: number
- default: 60
-resources:
- instance1:
- type: OS::Nova::Server
- properties:
- flavor: {get_param: flavor}
- image: {get_param: image}
- networks:
- - network: {get_param: network}
- user_data_format: RAW
- user_data:
- str_replace:
- template: '#!/bin/sh
-
- wc_notify --data-binary ''{"status": "SUCCESS"}''
-
- # signals with reason
-
- wc_notify --data-binary ''{"status": "SUCCESS", "reason":
- "signal2"}''
-
- # signals with data
-
- wc_notify --data-binary ''{"status": "SUCCESS", "reason":
- "signal3", "data": "data3"}''
-
- wc_notify --data-binary ''{"status": "SUCCESS", "reason":
- "signal4", "data": "data4"}''
-
- # check signals with the same number
-
- wc_notify --data-binary ''{"status": "SUCCESS", "id": "5"}''
-
- wc_notify --data-binary ''{"status": "SUCCESS", "id": "5"}''
-
- # loop for 20 signals without reasons and data
-
- for i in `seq 1 20`; do wc_notify --data-binary ''{"status":
- "SUCCESS"}'' & done
-
- wait
- '
- params:
- wc_notify:
- get_attr: [wait_handle, curl_cli]
-
- wait_condition:
- type: OS::Heat::WaitCondition
- depends_on: instance1
- properties:
- count: 25
- handle: {get_resource: wait_handle}
- timeout: {get_param: timeout}
-
- wait_handle:
- type: OS::Heat::WaitConditionHandle
-
-outputs:
- curl_cli:
- value:
- get_attr: [wait_handle, curl_cli]
- wc_data:
- value:
- get_attr: [wait_condition, data]
-'''
-
- def setUp(self):
- super(OSWaitCondition, self).setUp()
- if not self.conf.minimal_image_ref:
- raise self.skipException("No minimal image configured to test")
- if not self.conf.minimal_instance_type:
- raise self.skipException("No minimal flavor configured to test")
-
- def test_create_stack_with_multi_signal_waitcondition(self):
- params = {'flavor': self.conf.minimal_instance_type,
- 'image': self.conf.minimal_image_ref,
- 'network': self.conf.fixed_network_name,
- 'timeout': 120}
- self.stack_create(template=self.template, parameters=params)
diff --git a/heat_integrationtests/functional/test_preview.py b/heat_integrationtests/functional/test_preview.py
deleted file mode 100644
index 54f8a79f7..000000000
--- a/heat_integrationtests/functional/test_preview.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-from heatclient import exc
-import six
-
-
-class StackPreviewTest(functional_base.FunctionalTestsBase):
- template = '''
-heat_template_version: 2015-04-30
-parameters:
- incomming:
- type: string
-resources:
- one:
- type: OS::Heat::TestResource
- properties:
- value: fred
- two:
- type: OS::Heat::TestResource
- properties:
- value: {get_param: incomming}
- depends_on: one
-outputs:
- main_out:
- value: {get_attr: [two, output]}
- '''
- env = '''
-parameters:
- incomming: abc
- '''
-
- def setUp(self):
- super(StackPreviewTest, self).setUp()
- self.client = self.orchestration_client
- self.project_id = self.identity_client.project_id
-
- def _assert_resource(self, res, stack_name):
- self.assertEqual(stack_name, res['stack_name'])
- self.assertEqual('INIT', res['resource_action'])
- self.assertEqual('COMPLETE', res['resource_status'])
- for field in ('resource_status_reason', 'physical_resource_id',
- 'description'):
- self.assertIn(field, res)
- self.assertEqual('', res[field])
- # 'creation_time' and 'updated_time' are None when preview
- for field in ('creation_time', 'updated_time'):
- self.assertIn(field, res)
- self.assertIsNone(res[field])
- self.assertIn('output', res['attributes'])
-
- # resource_identity
- self.assertEqual(stack_name,
- res['resource_identity']['stack_name'])
- self.assertEqual('None', res['resource_identity']['stack_id'])
- self.assertEqual(self.project_id,
- res['resource_identity']['tenant'])
- self.assertEqual('/resources/%s' % res['resource_name'],
- res['resource_identity']['path'])
- # stack_identity
- self.assertEqual(stack_name,
- res['stack_identity']['stack_name'])
- self.assertEqual('None', res['stack_identity']['stack_id'])
- self.assertEqual(self.project_id,
- res['stack_identity']['tenant'])
- self.assertEqual('', res['stack_identity']['path'])
-
- def _assert_results(self, result, stack_name):
- # global stuff.
- self.assertEqual(stack_name, result['stack_name'])
- self.assertTrue(result['disable_rollback'])
- self.assertEqual('None', result['id'])
- self.assertIsNone(result['parent'])
- self.assertEqual('No description', result['template_description'])
-
- # parameters
- self.assertEqual('None', result['parameters']['OS::stack_id'])
- self.assertEqual(stack_name, result['parameters']['OS::stack_name'])
- self.assertEqual('abc', result['parameters']['incomming'])
-
- def test_basic_pass(self):
- stack_name = self._stack_rand_name()
- result = self.client.stacks.preview(
- template=self.template,
- stack_name=stack_name,
- disable_rollback=True,
- environment=self.env).to_dict()
-
- self._assert_results(result, stack_name)
- for res in result['resources']:
- self._assert_resource(res, stack_name)
- self.assertEqual('OS::Heat::TestResource',
- res['resource_type'])
-
- # common properties
- self.assertFalse(res['properties']['fail'])
- self.assertEqual(0, res['properties']['wait_secs'])
- self.assertFalse(res['properties']['update_replace'])
-
- if res['resource_name'] == 'one':
- self.assertEqual('fred', res['properties']['value'])
- self.assertEqual(['two'], res['required_by'])
- if res['resource_name'] == 'two':
- self.assertEqual('abc', res['properties']['value'])
- self.assertEqual([], res['required_by'])
-
- def test_basic_fail(self):
- stack_name = self._stack_rand_name()
-
- # break the template so it fails validation.
- wont_work = self.template.replace('get_param: incomming',
- 'get_param: missing')
- excp = self.assertRaises(exc.HTTPBadRequest,
- self.client.stacks.preview,
- template=wont_work,
- stack_name=stack_name,
- disable_rollback=True,
- environment=self.env)
-
- self.assertIn('Property error: : resources.two.properties.value: '
- ': The Parameter (missing) was not provided.',
- six.text_type(excp))
-
- def test_nested_pass(self):
- """Nested stacks need to recurse down the stacks."""
- main_template = '''
-heat_template_version: 2015-04-30
-parameters:
- incomming:
- type: string
-resources:
- main:
- type: nested.yaml
- properties:
- value: {get_param: incomming}
-outputs:
- main_out:
- value: {get_attr: [main, output]}
- '''
- nested_template = '''
-heat_template_version: 2015-04-30
-parameters:
- value:
- type: string
-resources:
- nested:
- type: OS::Heat::TestResource
- properties:
- value: {get_param: value}
-outputs:
- output:
- value: {get_attr: [nested, output]}
-'''
- stack_name = self._stack_rand_name()
- result = self.client.stacks.preview(
- disable_rollback=True,
- stack_name=stack_name,
- template=main_template,
- files={'nested.yaml': nested_template},
- environment=self.env).to_dict()
-
- self._assert_results(result, stack_name)
-
- # nested resources return a list of their resources.
- res = result['resources'][0][0]
- nested_stack_name = '%s-%s' % (stack_name,
- res['parent_resource'])
-
- self._assert_resource(res, nested_stack_name)
- self.assertEqual('OS::Heat::TestResource',
- res['resource_type'])
-
- self.assertFalse(res['properties']['fail'])
- self.assertEqual(0, res['properties']['wait_secs'])
- self.assertFalse(res['properties']['update_replace'])
-
- self.assertEqual('abc', res['properties']['value'])
- self.assertEqual([], res['required_by'])
-
- def test_res_group_with_nested_template(self):
- main_template = '''
-heat_template_version: 2015-04-30
-resources:
- fixed_network:
- type: "OS::Neutron::Net"
- rg:
- type: "OS::Heat::ResourceGroup"
- properties:
- count: 1
- resource_def:
- type: nested.yaml
- properties:
- fixed_network_id: {get_resource: fixed_network}
- '''
- nested_template = '''
-heat_template_version: 2015-04-30
-
-parameters:
- fixed_network_id:
- type: string
-resources:
- port:
- type: "OS::Neutron::Port"
- properties:
- network_id:
- get_param: fixed_network_id
-
-'''
- stack_name = self._stack_rand_name()
- result = self.client.stacks.preview(
- disable_rollback=True,
- stack_name=stack_name,
- template=main_template,
- files={'nested.yaml': nested_template}).to_dict()
-
- resource_names = []
-
- def get_resource_names(resources):
- for item in resources:
- if isinstance(item, dict):
- resource_names.append(item['resource_name'])
- else:
- get_resource_names(item)
- get_resource_names(result['resources'])
- # ensure that fixed network and port here
- self.assertIn('fixed_network', resource_names)
- self.assertIn('port', resource_names)
diff --git a/heat_integrationtests/functional/test_reload_on_sighup.py b/heat_integrationtests/functional/test_reload_on_sighup.py
deleted file mode 100644
index f87669c51..000000000
--- a/heat_integrationtests/functional/test_reload_on_sighup.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-import subprocess
-import time
-
-import eventlet
-
-from oslo_concurrency import processutils
-from six.moves import configparser
-
-from heat_integrationtests.functional import functional_base
-
-
-class ReloadOnSighupTest(functional_base.FunctionalTestsBase):
-
- def setUp(self):
- self.config_file = "/etc/heat/heat.conf"
- super(ReloadOnSighupTest, self).setUp()
-
- def _is_mod_wsgi_daemon(self, service):
- process = ''.join(['wsgi:',
- service[:9]]).replace('_', '-').encode('utf-8')
- s = subprocess.Popen(["ps", "ax"], stdout=subprocess.PIPE)
- for x in s.stdout:
- if re.search(process, x):
- return True
-
- def _set_config_value(self, service, key, value):
- config = configparser.ConfigParser()
-
- # NOTE(prazumovsky): If there are several workers, there can be
- # situation, when one thread opens self.config_file for writing
- # (so config_file erases with opening), in that moment other thread
- # intercepts to this file and try to set config option value, i.e.
- # write to file, which is already erased by first thread, so,
- # NoSectionError raised. So, should wait until first thread writes to
- # config_file.
- retries_count = self.conf.sighup_config_edit_retries
- while True:
- config.read(self.config_file)
- try:
- config.set(service, key, str(value))
- except configparser.NoSectionError:
- if retries_count <= 0:
- raise
- retries_count -= 1
- eventlet.sleep(1)
- else:
- break
-
- with open(self.config_file, 'w') as f:
- config.write(f)
-
- def _get_config_value(self, service, key):
- config = configparser.ConfigParser()
- config.read(self.config_file)
- val = config.get(service, key)
- return val
-
- def _get_heat_api_pids(self, service):
- # get the pids of all heat-api processes
- if service == "heat_api":
- process = "heat-api|grep -Ev 'grep|cloudwatch|cfn'"
- else:
- process = "%s|grep -Ev 'grep'" % service.replace('_', '-')
- cmd = "ps -ef|grep %s|awk '{print $2}'" % process
- out, err = processutils.execute(cmd, shell=True)
- self.assertIsNotNone(out, "heat-api service not running. %s" % err)
- pids = filter(None, out.split('\n'))
-
- # get the parent pids of all heat-api processes
- cmd = "ps -ef|grep %s|awk '{print $3}'" % process
- out, _ = processutils.execute(cmd, shell=True)
- parent_pids = filter(None, out.split('\n'))
-
- heat_api_parent = list(set(pids) & set(parent_pids))[0]
- heat_api_children = list(set(pids) - set(parent_pids))
-
- return heat_api_parent, heat_api_children
-
- def _change_config(self, service, old_workers, new_workers):
- pre_reload_parent, pre_reload_children = self._get_heat_api_pids(
- service)
- self.assertEqual(old_workers, len(pre_reload_children))
-
- # change the config values
- self._set_config_value(service, 'workers', new_workers)
- cmd = "kill -HUP %s" % pre_reload_parent
- processutils.execute(cmd, shell=True)
-
- # wait till heat-api reloads
- start_time = time.time()
- while time.time() - start_time < self.conf.sighup_timeout:
- post_reload_parent, post_reload_children = self._get_heat_api_pids(
- service)
- intersect = set(post_reload_children) & set(pre_reload_children)
- if (new_workers == len(post_reload_children)
- and pre_reload_parent == post_reload_parent
- and intersect == set()):
- break
- eventlet.sleep(1)
- self.assertEqual(pre_reload_parent, post_reload_parent)
- self.assertEqual(new_workers, len(post_reload_children))
- # test if all child processes are newly created
- self.assertEqual(set(post_reload_children) & set(pre_reload_children),
- set())
-
- def _reload(self, service):
- old_workers = int(self._get_config_value(service, 'workers'))
- new_workers = old_workers + 1
- self.addCleanup(self._set_config_value, service, 'workers',
- old_workers)
-
- self._change_config(service, old_workers, new_workers)
- # revert all the changes made
- self._change_config(service, new_workers, old_workers)
-
- def _reload_on_sighup(self, service):
- if not self._is_mod_wsgi_daemon(service):
- self._reload(service)
- else:
- self.skipTest('Skipping Test, Service running under httpd.')
-
- def test_api_reload_on_sighup(self):
- self._reload_on_sighup('heat_api')
-
- def test_api_cfn_reload_on_sighup(self):
- self._reload_on_sighup('heat_api_cfn')
-
- def test_api_cloudwatch_on_sighup(self):
- self._reload_on_sighup('heat_api_cloudwatch')
diff --git a/heat_integrationtests/functional/test_remote_stack.py b/heat_integrationtests/functional/test_remote_stack.py
deleted file mode 100644
index b82958c85..000000000
--- a/heat_integrationtests/functional/test_remote_stack.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from heatclient import exc
-import six
-
-from heat_integrationtests.functional import functional_base
-
-
-class RemoteStackTest(functional_base.FunctionalTestsBase):
- template = '''
-heat_template_version: 2013-05-23
-resources:
- my_stack:
- type: OS::Heat::Stack
- properties:
- context:
- region_name: RegionOne
- template:
- get_file: remote_stack.yaml
-outputs:
- key:
- value: {get_attr: [my_stack, outputs]}
-'''
-
- remote_template = '''
-heat_template_version: 2013-05-23
-resources:
- random1:
- type: OS::Heat::RandomString
-outputs:
- remote_key:
- value: {get_attr: [random1, value]}
-'''
-
- def setUp(self):
- super(RemoteStackTest, self).setUp()
- # replacing the template region with the one from the config
- self.template = self.template.replace('RegionOne',
- self.conf.region)
-
- def test_remote_stack_alone(self):
- stack_id = self.stack_create(template=self.remote_template)
- expected_resources = {'random1': 'OS::Heat::RandomString'}
- self.assertEqual(expected_resources, self.list_resources(stack_id))
- stack = self.client.stacks.get(stack_id)
- output_value = self._stack_output(stack, 'remote_key')
- self.assertEqual(32, len(output_value))
-
- def test_stack_create(self):
- files = {'remote_stack.yaml': self.remote_template}
- stack_id = self.stack_create(files=files)
-
- expected_resources = {'my_stack': 'OS::Heat::Stack'}
- self.assertEqual(expected_resources, self.list_resources(stack_id))
-
- stack = self.client.stacks.get(stack_id)
- output = self._stack_output(stack, 'key')
- parent_output_value = output['remote_key']
- self.assertEqual(32, len(parent_output_value))
-
- rsrc = self.client.resources.get(stack_id, 'my_stack')
- remote_id = rsrc.physical_resource_id
- rstack = self.client.stacks.get(remote_id)
- self.assertEqual(remote_id, rstack.id)
- remote_output_value = self._stack_output(rstack, 'remote_key')
- self.assertEqual(32, len(remote_output_value))
- self.assertEqual(parent_output_value, remote_output_value)
-
- remote_resources = {'random1': 'OS::Heat::RandomString'}
- self.assertEqual(remote_resources, self.list_resources(remote_id))
-
- def test_stack_create_bad_region(self):
- tmpl_bad_region = self.template.replace(self.conf.region, 'DARKHOLE')
- files = {'remote_stack.yaml': self.remote_template}
- kwargs = {
- 'template': tmpl_bad_region,
- 'files': files
- }
- ex = self.assertRaises(exc.HTTPBadRequest, self.stack_create, **kwargs)
-
- error_msg = ('ERROR: Cannot establish connection to Heat endpoint '
- 'at region "DARKHOLE" due to "publicURL endpoint for '
- 'orchestration service in DARKHOLE region not found"')
- self.assertEqual(error_msg, six.text_type(ex))
-
- def test_stack_resource_validation_fail(self):
- tmpl_bad_format = self.remote_template.replace('resources', 'resource')
- files = {'remote_stack.yaml': tmpl_bad_format}
- kwargs = {'files': files}
- ex = self.assertRaises(exc.HTTPBadRequest, self.stack_create, **kwargs)
-
- error_msg = ('ERROR: Failed validating stack template using Heat '
- 'endpoint at region "%s" due to '
- '"ERROR: The template section is '
- 'invalid: resource"') % self.conf.region
- self.assertEqual(error_msg, six.text_type(ex))
-
- def test_stack_update(self):
- files = {'remote_stack.yaml': self.remote_template}
- stack_id = self.stack_create(files=files)
-
- expected_resources = {'my_stack': 'OS::Heat::Stack'}
- self.assertEqual(expected_resources, self.list_resources(stack_id))
-
- rsrc = self.client.resources.get(stack_id, 'my_stack')
- physical_resource_id = rsrc.physical_resource_id
- rstack = self.client.stacks.get(physical_resource_id)
- self.assertEqual(physical_resource_id, rstack.id)
-
- remote_resources = {'random1': 'OS::Heat::RandomString'}
- self.assertEqual(remote_resources,
- self.list_resources(rstack.id))
- # do an update
- update_template = self.remote_template.replace('random1', 'random2')
- files = {'remote_stack.yaml': update_template}
- self.update_stack(stack_id, self.template, files=files)
-
- # check if the remote stack is still there with the same ID
- self.assertEqual(expected_resources, self.list_resources(stack_id))
- rsrc = self.client.resources.get(stack_id, 'my_stack')
- physical_resource_id = rsrc.physical_resource_id
- rstack = self.client.stacks.get(physical_resource_id)
- self.assertEqual(physical_resource_id, rstack.id)
-
- remote_resources = {'random2': 'OS::Heat::RandomString'}
- self.assertEqual(remote_resources,
- self.list_resources(rstack.id))
-
- def test_stack_suspend_resume(self):
- files = {'remote_stack.yaml': self.remote_template}
- stack_id = self.stack_create(files=files)
- self.stack_suspend(stack_id)
- self.stack_resume(stack_id)
diff --git a/heat_integrationtests/functional/test_replace_deprecated.py b/heat_integrationtests/functional/test_replace_deprecated.py
index 5e7fdc67e..bbf2e66cc 100644
--- a/heat_integrationtests/functional/test_replace_deprecated.py
+++ b/heat_integrationtests/functional/test_replace_deprecated.py
@@ -69,24 +69,28 @@ properties:
parameters=parms,
template=deployments_template,
enable_cleanup=self.enable_cleanup)
+
expected_resources = {'config': 'OS::Heat::SoftwareConfig',
'dep': 'OS::Heat::SoftwareDeployments',
'server': 'OS::Nova::Server'}
- resource = self.client.resources.get(stack_identifier, 'server')
self.assertEqual(expected_resources,
self.list_resources(stack_identifier))
+
+ resource = self.client.resources.get(stack_identifier, 'dep')
initial_phy_id = resource.physical_resource_id
+
resources = deployments_template['resources']
resources['dep'] = yaml.safe_load(self.deployment_group_snippet)
self.update_stack(
stack_identifier,
deployments_template,
parameters=parms)
- resource = self.client.resources.get(stack_identifier, 'server')
- self.assertEqual(initial_phy_id,
- resource.physical_resource_id)
+
expected_new_resources = {'config': 'OS::Heat::SoftwareConfig',
'dep': 'OS::Heat::SoftwareDeploymentGroup',
'server': 'OS::Nova::Server'}
self.assertEqual(expected_new_resources,
self.list_resources(stack_identifier))
+
+ resource = self.client.resources.get(stack_identifier, 'dep')
+ self.assertEqual(initial_phy_id, resource.physical_resource_id)
diff --git a/heat_integrationtests/functional/test_resource_group.py b/heat_integrationtests/functional/test_resource_group.py
index 3f47ca5a3..58802638f 100644
--- a/heat_integrationtests/functional/test_resource_group.py
+++ b/heat_integrationtests/functional/test_resource_group.py
@@ -322,6 +322,37 @@ outputs:
updated_rand = self._stack_output(stack1, 'random1')
self.assertNotEqual(initial_rand, updated_rand)
+ def test_validation(self):
+ resource_group = '''
+heat_template_version: 2016-10-14
+
+parameters:
+ the_count:
+ type: number
+
+resources:
+
+ the_group:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {get_param: the_count}
+ resource_def:
+ type: OS::Heat::RandomString
+'''
+ ret = self.client.stacks.validate(template=resource_group)
+ expected = {'Description': 'No description',
+ 'Environment': {'event_sinks': [],
+ 'parameter_defaults': {},
+ 'parameters': {},
+ 'resource_registry': {u'resources': {}}},
+ 'Parameters': {
+ 'the_count': {'Description': '',
+ 'Label': 'the_count',
+ 'NoEcho': 'false',
+ 'Type': 'Number'}}}
+
+ self.assertEqual(expected, ret)
+
class ResourceGroupTestNullParams(functional_base.FunctionalTestsBase):
template = '''
diff --git a/heat_integrationtests/functional/test_resources_list.py b/heat_integrationtests/functional/test_resources_list.py
deleted file mode 100644
index f57cf6736..000000000
--- a/heat_integrationtests/functional/test_resources_list.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-test_template_depend = {
- 'heat_template_version': '2013-05-23',
- 'resources': {
- 'test1': {
- 'type': 'OS::Heat::TestResource',
- 'properties': {
- 'value': 'Test1',
- }
- },
- 'test2': {
- 'type': 'OS::Heat::TestResource',
- 'depends_on': ['test1'],
- 'properties': {
- 'value': 'Test2',
- }
- }
- }
-}
-
-
-class ResourcesList(functional_base.FunctionalTestsBase):
-
- def test_filtering_with_depend(self):
- stack_identifier = self.stack_create(template=test_template_depend)
- [test2] = self.client.resources.list(stack_identifier,
- filters={'name': 'test2'})
-
- self.assertEqual('CREATE_COMPLETE', test2.resource_status)
-
- def test_required_by(self):
- stack_identifier = self.stack_create(template=test_template_depend)
- [test1] = self.client.resources.list(stack_identifier,
- filters={'name': 'test1'})
-
- self.assertEqual(['test2'], test1.required_by)
diff --git a/heat_integrationtests/functional/test_software_config.py b/heat_integrationtests/functional/test_software_config.py
deleted file mode 100644
index 8c1cd53f9..000000000
--- a/heat_integrationtests/functional/test_software_config.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-import os
-import requests
-import subprocess
-import sys
-import tempfile
-import time
-import yaml
-
-from oslo_utils import timeutils
-
-from heat_integrationtests.common import exceptions
-from heat_integrationtests.common import test
-from heat_integrationtests.functional import functional_base
-
-
-class ParallelDeploymentsTest(functional_base.FunctionalTestsBase):
- server_template = '''
-heat_template_version: "2013-05-23"
-parameters:
- flavor:
- type: string
- image:
- type: string
- network:
- type: string
-resources:
- server:
- type: OS::Nova::Server
- properties:
- image: {get_param: image}
- flavor: {get_param: flavor}
- user_data_format: SOFTWARE_CONFIG
- networks: [{network: {get_param: network}}]
-outputs:
- server:
- value: {get_resource: server}
-'''
-
- config_template = '''
-heat_template_version: "2013-05-23"
-parameters:
- server:
- type: string
-resources:
- config:
- type: OS::Heat::SoftwareConfig
- properties:
-'''
-
- deployment_snippet = '''
-type: OS::Heat::SoftwareDeployments
-properties:
- config: {get_resource: config}
- servers: {'0': {get_param: server}}
-'''
-
- enable_cleanup = True
-
- def test_deployments_metadata(self):
- parms = {'flavor': self.conf.minimal_instance_type,
- 'network': self.conf.fixed_network_name,
- 'image': self.conf.minimal_image_ref}
- stack_identifier = self.stack_create(
- parameters=parms,
- template=self.server_template,
- enable_cleanup=self.enable_cleanup)
- server_stack = self.client.stacks.get(stack_identifier)
- server = server_stack.outputs[0]['output_value']
-
- config_stacks = []
- # add up to 3 stacks each with up to 3 deployments
- deploy_count = 0
- deploy_count = self.deploy_many_configs(
- stack_identifier,
- server,
- config_stacks,
- 2,
- 5,
- deploy_count)
- self.deploy_many_configs(
- stack_identifier,
- server,
- config_stacks,
- 3,
- 3,
- deploy_count)
-
- self.signal_deployments(stack_identifier)
- for config_stack in config_stacks:
- self._wait_for_stack_status(config_stack, 'CREATE_COMPLETE')
-
- def test_deployments_timeout_failed(self):
- parms = {'flavor': self.conf.minimal_instance_type,
- 'network': self.conf.fixed_network_name,
- 'image': self.conf.minimal_image_ref}
- stack_identifier = self.stack_create(
- parameters=parms,
- template=self.server_template,
- enable_cleanup=self.enable_cleanup)
- server_stack = self.client.stacks.get(stack_identifier)
- server = server_stack.outputs[0]['output_value']
- config_stack = self.deploy_config(server, 3, 1)
- self._wait_for_stack_status(config_stack, 'CREATE_FAILED')
- kwargs = {'server_id': server}
-
- def check_deployment_status():
- sd_list = self.client.software_deployments.list(**kwargs)
- for sd in sd_list:
- if sd.status != 'FAILED':
- return False
- return True
-
- self.assertTrue(test.call_until_true(
- 20, 0, check_deployment_status))
-
- def deploy_many_configs(self, stack, server, config_stacks,
- stack_count, deploys_per_stack,
- deploy_count_start):
- for a in range(stack_count):
- config_stacks.append(
- self.deploy_config(server, deploys_per_stack))
-
- new_count = deploy_count_start + stack_count * deploys_per_stack
- self.wait_for_deploy_metadata_set(stack, new_count)
- return new_count
-
- def deploy_config(self, server, deploy_count, timeout=None):
- parms = {'server': server}
- template = yaml.safe_load(self.config_template)
- resources = template['resources']
- resources['config']['properties'] = {'config': 'x' * 10000}
- for a in range(deploy_count):
- resources['dep_%s' % a] = yaml.safe_load(self.deployment_snippet)
- return self.stack_create(
- parameters=parms,
- template=template,
- enable_cleanup=self.enable_cleanup,
- expected_status=None,
- timeout=timeout)
-
- def wait_for_deploy_metadata_set(self, stack, deploy_count):
- build_timeout = self.conf.build_timeout
- build_interval = self.conf.build_interval
-
- start = timeutils.utcnow()
- while timeutils.delta_seconds(start,
- timeutils.utcnow()) < build_timeout:
- server_metadata = self.client.resources.metadata(
- stack, 'server')
- if len(server_metadata['deployments']) == deploy_count:
- return
- time.sleep(build_interval)
-
- message = ('Deployment resources failed to be created within '
- 'the required time (%s s).' %
- (build_timeout))
- raise exceptions.TimeoutException(message)
-
- def signal_deployments(self, stack_identifier):
- server_metadata = self.client.resources.metadata(
- stack_identifier, 'server')
- for dep in server_metadata['deployments']:
- iv = dict((i['name'], i['value']) for i in dep['inputs'])
- sigurl = iv.get('deploy_signal_id')
- requests.post(sigurl, data='{}',
- headers={'content-type': 'application/json'},
- verify=self.verify_cert)
-
-
-class ZaqarSignalTransportTest(functional_base.FunctionalTestsBase):
- server_template = '''
-heat_template_version: "2013-05-23"
-
-parameters:
- flavor:
- type: string
- image:
- type: string
- network:
- type: string
-
-resources:
- server:
- type: OS::Nova::Server
- properties:
- image: {get_param: image}
- flavor: {get_param: flavor}
- user_data_format: SOFTWARE_CONFIG
- software_config_transport: ZAQAR_MESSAGE
- networks: [{network: {get_param: network}}]
- config:
- type: OS::Heat::SoftwareConfig
- properties:
- config: echo 'foo'
- deployment:
- type: OS::Heat::SoftwareDeployment
- properties:
- config: {get_resource: config}
- server: {get_resource: server}
- signal_transport: ZAQAR_SIGNAL
-
-outputs:
- data:
- value: {get_attr: [deployment, deploy_stdout]}
-'''
-
- conf_template = '''
-[zaqar]
-user_id = %(user_id)s
-password = %(password)s
-project_id = %(project_id)s
-auth_url = %(auth_url)s
-queue_id = %(queue_id)s
- '''
-
- def test_signal_queues(self):
- parms = {'flavor': self.conf.minimal_instance_type,
- 'network': self.conf.fixed_network_name,
- 'image': self.conf.minimal_image_ref}
- stack_identifier = self.stack_create(
- parameters=parms,
- template=self.server_template,
- expected_status=None)
- metadata = self.wait_for_deploy_metadata_set(stack_identifier)
- config = metadata['os-collect-config']['zaqar']
- conf_content = self.conf_template % config
- fd, temp_path = tempfile.mkstemp()
- os.write(fd, conf_content.encode('utf-8'))
- os.close(fd)
- cmd = ['os-collect-config', '--one-time',
- '--config-file=%s' % temp_path, 'zaqar']
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- stdout_value = proc.communicate()[0]
- data = json.loads(stdout_value.decode('utf-8'))
- self.assertEqual(config, data['zaqar']['os-collect-config']['zaqar'])
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- stdout_value = proc.communicate()[0]
- data = json.loads(stdout_value.decode('utf-8'))
-
- fd, temp_path = tempfile.mkstemp()
- os.write(fd,
- json.dumps(data['zaqar']['deployments'][0]).encode('utf-8'))
- os.close(fd)
- cmd = [sys.executable, self.conf.heat_config_notify_script, temp_path]
- proc = subprocess.Popen(cmd,
- stderr=subprocess.PIPE,
- stdin=subprocess.PIPE)
- proc.communicate(
- json.dumps({'deploy_stdout': 'here!'}).encode('utf-8'))
- self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
- stack = self.client.stacks.get(stack_identifier)
- self.assertEqual('here!', stack.outputs[0]['output_value'])
-
- def wait_for_deploy_metadata_set(self, stack):
- build_timeout = self.conf.build_timeout
- build_interval = self.conf.build_interval
-
- start = timeutils.utcnow()
- while timeutils.delta_seconds(start,
- timeutils.utcnow()) < build_timeout:
- server_metadata = self.client.resources.metadata(
- stack, 'server')
- if server_metadata.get('deployments'):
- return server_metadata
- time.sleep(build_interval)
-
- message = ('Deployment resources failed to be created within '
- 'the required time (%s s).' %
- (build_timeout))
- raise exceptions.TimeoutException(message)
diff --git a/heat_integrationtests/functional/test_stack_events.py b/heat_integrationtests/functional/test_stack_events.py
deleted file mode 100644
index d5a7fada9..000000000
--- a/heat_integrationtests/functional/test_stack_events.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-class StackEventsTest(functional_base.FunctionalTestsBase):
-
- template = '''
-heat_template_version: 2014-10-16
-parameters:
-resources:
- test_resource:
- type: OS::Heat::TestResource
- properties:
- value: 'test1'
- fail: False
- update_replace: False
- wait_secs: 0
-outputs:
- resource_id:
- description: 'ID of resource'
- value: { get_resource: test_resource }
-'''
-
- def _verify_event_fields(self, event, event_characteristics):
- self.assertIsNotNone(event_characteristics)
- self.assertIsNotNone(event.event_time)
- self.assertIsNotNone(event.links)
- self.assertIsNotNone(event.logical_resource_id)
- self.assertIsNotNone(event.resource_status)
- self.assertIn(event.resource_status, event_characteristics[1])
- self.assertIsNotNone(event.resource_status_reason)
- self.assertIsNotNone(event.id)
-
- def test_event(self):
- parameters = {}
-
- test_stack_name = self._stack_rand_name()
- stack_identifier = self.stack_create(
- stack_name=test_stack_name,
- template=self.template,
- parameters=parameters
- )
-
- expected_status = ['CREATE_IN_PROGRESS', 'CREATE_COMPLETE']
- event_characteristics = {
- test_stack_name: ('OS::Heat::Stack', expected_status),
- 'test_resource': ('OS::Heat::TestResource', expected_status)}
-
- # List stack events
- # API: GET /v1/{tenant_id}/stacks/{stack_name}/{stack_id}/events
- stack_events = self.client.events.list(stack_identifier)
-
- for stack_event in stack_events:
- # Key on an expected/valid resource name
- self._verify_event_fields(
- stack_event,
- event_characteristics[stack_event.resource_name])
-
- # Test the event filtering API based on this resource_name
- # /v1/{tenant_id}/stacks/{stack_name}/{stack_id}/resources/{resource_name}/events
- resource_events = self.client.events.list(
- stack_identifier,
- stack_event.resource_name)
-
- # Resource events are a subset of the original stack event list
- self.assertLess(len(resource_events), len(stack_events))
-
- # Get the event details for each resource event
- for resource_event in resource_events:
- # A resource_event should be in the original stack event list
- self.assertIn(resource_event, stack_events)
- # Given a filtered list, the resource names should be identical
- self.assertEqual(
- resource_event.resource_name,
- stack_event.resource_name)
- # Verify all fields, keying off the resource_name
- self._verify_event_fields(
- resource_event,
- event_characteristics[resource_event.resource_name])
-
- # Exercise the event details API
- # /v1/{tenant_id}/stacks/{stack_name}/{stack_id}/resources/{resource_name}/events/{event_id}
- event_details = self.client.events.get(
- stack_identifier,
- resource_event.resource_name,
- resource_event.id)
- self._verify_event_fields(
- event_details,
- event_characteristics[event_details.resource_name])
- # The names should be identical to the non-detailed event
- self.assertEqual(
- resource_event.resource_name,
- event_details.resource_name)
- # Verify the extra field in the detail results
- self.assertIsNotNone(event_details.resource_type)
- self.assertEqual(
- event_characteristics[event_details.resource_name][0],
- event_details.resource_type)
diff --git a/heat_integrationtests/functional/test_stack_outputs.py b/heat_integrationtests/functional/test_stack_outputs.py
deleted file mode 100644
index b7d7cd643..000000000
--- a/heat_integrationtests/functional/test_stack_outputs.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-class StackOutputsTest(functional_base.FunctionalTestsBase):
-
- template = '''
-heat_template_version: 2015-10-15
-resources:
- test_resource_a:
- type: OS::Heat::TestResource
- properties:
- value: 'a'
- test_resource_b:
- type: OS::Heat::TestResource
- properties:
- value: 'b'
-outputs:
- resource_output_a:
- description: 'Output of resource a'
- value: { get_attr: [test_resource_a, output] }
- resource_output_b:
- description: 'Output of resource b'
- value: { get_attr: [test_resource_b, output] }
-'''
-
- def test_outputs(self):
- stack_identifier = self.stack_create(
- template=self.template
- )
- expected_list = [{u'output_key': u'resource_output_a',
- u'description': u'Output of resource a'},
- {u'output_key': u'resource_output_b',
- u'description': u'Output of resource b'}]
-
- actual_list = self.client.stacks.output_list(
- stack_identifier)['outputs']
- sorted_actual_list = sorted(actual_list, key=lambda x: x['output_key'])
- self.assertEqual(expected_list, sorted_actual_list)
-
- expected_output_a = {
- u'output_value': u'a', u'output_key': u'resource_output_a',
- u'description': u'Output of resource a'}
- expected_output_b = {
- u'output_value': u'b', u'output_key': u'resource_output_b',
- u'description': u'Output of resource b'}
- actual_output_a = self.client.stacks.output_show(
- stack_identifier, 'resource_output_a')['output']
- actual_output_b = self.client.stacks.output_show(
- stack_identifier, 'resource_output_b')['output']
- self.assertEqual(expected_output_a, actual_output_a)
- self.assertEqual(expected_output_b, actual_output_b)
-
- before_template = '''
-heat_template_version: 2015-10-15
-resources:
- test_resource_a:
- type: OS::Heat::TestResource
- properties:
- value: 'foo'
-outputs:
-'''
-
- after_template = '''
-heat_template_version: 2015-10-15
-resources:
- test_resource_a:
- type: OS::Heat::TestResource
- properties:
- value: 'foo'
- test_resource_b:
- type: OS::Heat::TestResource
- properties:
- value: {get_attr: [test_resource_a, output]}
-outputs:
- output_value:
- description: 'Output of resource b'
- value: {get_attr: [test_resource_b, output]}
-'''
-
- def test_outputs_update_new_resource(self):
- stack_identifier = self.stack_create(template=self.before_template)
- self.update_stack(stack_identifier, template=self.after_template)
-
- expected_output_value = {
- u'output_value': u'foo', u'output_key': u'output_value',
- u'description': u'Output of resource b'}
- actual_output_value = self.client.stacks.output_show(
- stack_identifier, 'output_value')['output']
- self.assertEqual(expected_output_value, actual_output_value)
-
- nested_template = '''
-heat_template_version: 2015-10-15
-resources:
- parent:
- type: 1.yaml
-outputs:
- resource_output_a:
- value: { get_attr: [parent, resource_output_a] }
- description: 'parent a'
- resource_output_b:
- value: { get_attr: [parent, resource_output_b] }
- description: 'parent b'
- '''
- error_template = '''
-heat_template_version: 2015-10-15
-resources:
- test_resource_a:
- type: OS::Heat::TestResource
- properties:
- value: 'a'
- test_resource_b:
- type: OS::Heat::TestResource
- properties:
- value: 'b'
-outputs:
- resource_output_a:
- description: 'Output of resource a'
- value: { get_attr: [test_resource_a, output] }
- resource_output_b:
- description: 'Output of resource b'
- value: { get_param: foo }
-'''
-
- def test_output_error_nested(self):
- stack_identifier = self.stack_create(
- template=self.nested_template,
- files={'1.yaml': self.error_template}
- )
- self.update_stack(stack_identifier, template=self.nested_template,
- files={'1.yaml': self.error_template})
- expected_list = [{u'output_key': u'resource_output_a',
- u'output_value': u'a',
- u'description': u'parent a'},
- {u'output_key': u'resource_output_b',
- u'output_value': None,
- u'output_error': u'Error in parent output '
- u'resource_output_b: The Parameter'
- u' (foo) was not provided.',
- u'description': u'parent b'}]
-
- actual_list = self.client.stacks.get(stack_identifier).outputs
- sorted_actual_list = sorted(actual_list, key=lambda x: x['output_key'])
- self.assertEqual(expected_list, sorted_actual_list)
diff --git a/heat_integrationtests/functional/test_stack_tags.py b/heat_integrationtests/functional/test_stack_tags.py
deleted file mode 100644
index 4a9779837..000000000
--- a/heat_integrationtests/functional/test_stack_tags.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-class StackTagTest(functional_base.FunctionalTestsBase):
-
- template = '''
-heat_template_version: 2014-10-16
-description:
- foo
-parameters:
- input:
- type: string
- default: test
-resources:
- not-used:
- type: OS::Heat::TestResource
- properties:
- wait_secs: 1
- value: {get_param: input}
-'''
-
- def test_stack_tag(self):
- # Stack create with stack tags
- tags = 'foo,bar'
- stack_identifier = self.stack_create(
- template=self.template,
- tags=tags
- )
-
- # Ensure property tag is populated and matches given tags
- stack = self.client.stacks.get(stack_identifier)
- self.assertEqual(['foo', 'bar'], stack.tags)
-
- # Update tags
- updated_tags = 'tag1,tag2'
- self.update_stack(
- stack_identifier,
- template=self.template,
- tags=updated_tags,
- parameters={'input': 'next'})
-
- # Ensure property tag is populated and matches updated tags
- updated_stack = self.client.stacks.get(stack_identifier)
- self.assertEqual(['tag1', 'tag2'], updated_stack.tags)
-
- # Delete tags
- self.update_stack(
- stack_identifier,
- template=self.template,
- parameters={'input': 'none'}
- )
-
- # Ensure property tag is not populated
- empty_tags_stack = self.client.stacks.get(stack_identifier)
- self.assertIsNone(empty_tags_stack.tags)
-
- def test_hidden_stack(self):
- # Stack create with hidden stack tag
- tags = 'foo,hidden'
- self.stack_create(
- template=self.template,
- tags=tags)
- # Ensure stack does not exist when we do a stack list
- for stack in self.client.stacks.list():
- self.assertNotIn('hidden', stack.tags, "Hidden stack can be seen")
diff --git a/heat_integrationtests/functional/test_template_validate.py b/heat_integrationtests/functional/test_template_validate.py
deleted file mode 100644
index 451de49c5..000000000
--- a/heat_integrationtests/functional/test_template_validate.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import six
-
-from heatclient import exc
-
-from heat_integrationtests.functional import functional_base
-
-
-class StackTemplateValidateTest(functional_base.FunctionalTestsBase):
-
- random_template = '''
-heat_template_version: 2014-10-16
-description: the stack description
-parameters:
- aparam:
- type: number
- default: 10
- description: the param description
-resources:
- myres:
- type: OS::Heat::RandomString
- properties:
- length: {get_param: aparam}
-'''
-
- parent_template = '''
-heat_template_version: 2014-10-16
-description: the parent template
-parameters:
- pparam:
- type: number
- default: 5
- description: the param description
-resources:
- nres:
- type: mynested.yaml
- properties:
- aparam: {get_param: pparam}
-'''
-
- parent_template_noprop = '''
-heat_template_version: 2014-10-16
-description: the parent template
-resources:
- nres:
- type: mynested.yaml
-'''
-
- random_template_groups = '''
-heat_template_version: 2014-10-16
-description: the stack description
-parameters:
- aparam:
- type: number
- default: 10
- description: the param description
- bparam:
- type: string
- default: foo
- cparam:
- type: string
- default: secret
- hidden: true
-parameter_groups:
-- label: str_params
- description: The string params
- parameters:
- - bparam
- - cparam
-resources:
- myres:
- type: OS::Heat::RandomString
- properties:
- length: {get_param: aparam}
-'''
-
- def test_template_validate_basic(self):
- ret = self.client.stacks.validate(template=self.random_template)
- expected = {'Description': 'the stack description',
- 'Parameters': {
- 'aparam': {'Default': 10,
- 'Description': 'the param description',
- 'Label': 'aparam',
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {},
- 'resource_registry': {u'resources': {}}}}
- self.assertEqual(expected, ret)
-
- def test_template_validate_override_default(self):
- env = {'parameters': {'aparam': 5}}
- ret = self.client.stacks.validate(template=self.random_template,
- environment=env)
- expected = {'Description': 'the stack description',
- 'Parameters': {
- 'aparam': {'Default': 10,
- 'Value': 5,
- 'Description': 'the param description',
- 'Label': 'aparam',
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {'aparam': 5},
- 'resource_registry': {u'resources': {}}}}
- self.assertEqual(expected, ret)
-
- def test_template_validate_override_none(self):
- env = {'resource_registry': {
- 'OS::Heat::RandomString': 'OS::Heat::None'}}
- ret = self.client.stacks.validate(template=self.random_template,
- environment=env)
- expected = {'Description': 'the stack description',
- 'Parameters': {
- 'aparam': {'Default': 10,
- 'Description': 'the param description',
- 'Label': 'aparam',
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {},
- 'resource_registry': {
- 'OS::Heat::RandomString': 'OS::Heat::None',
- u'resources': {}}}}
- self.assertEqual(expected, ret)
-
- def test_template_validate_basic_required_param(self):
- tmpl = self.random_template.replace('default: 10', '')
- ret = self.client.stacks.validate(template=tmpl)
- expected = {'Description': 'the stack description',
- 'Parameters': {
- 'aparam': {'Description': 'the param description',
- 'Label': 'aparam',
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {},
- 'resource_registry': {u'resources': {}}}}
- self.assertEqual(expected, ret)
-
- def test_template_validate_fail_version(self):
- fail_template = self.random_template.replace('2014-10-16', 'invalid')
- ex = self.assertRaises(exc.HTTPBadRequest,
- self.client.stacks.validate,
- template=fail_template)
- self.assertIn('The template version is invalid', six.text_type(ex))
-
- def test_template_validate_parameter_groups(self):
- ret = self.client.stacks.validate(template=self.random_template_groups)
- expected = {'Description': 'the stack description',
- 'ParameterGroups':
- [{'description': 'The string params',
- 'label': 'str_params',
- 'parameters': ['bparam', 'cparam']}],
- 'Parameters':
- {'aparam':
- {'Default': 10,
- 'Description': 'the param description',
- 'Label': 'aparam',
- 'NoEcho': 'false',
- 'Type': 'Number'},
- 'bparam':
- {'Default': 'foo',
- 'Description': '',
- 'Label': 'bparam',
- 'NoEcho': 'false',
- 'Type': 'String'},
- 'cparam':
- {'Default': 'secret',
- 'Description': '',
- 'Label': 'cparam',
- 'NoEcho': 'true',
- 'Type': 'String'}},
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {},
- 'resource_registry': {u'resources': {}}}}
- self.assertEqual(expected, ret)
-
- def test_template_validate_nested_off(self):
- files = {'mynested.yaml': self.random_template}
- ret = self.client.stacks.validate(template=self.parent_template,
- files=files)
- expected = {'Description': 'the parent template',
- 'Parameters': {
- 'pparam': {'Default': 5,
- 'Description': 'the param description',
- 'Label': 'pparam',
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {},
- 'resource_registry': {
- u'mynested.yaml': u'mynested.yaml',
- u'resources': {}}}}
- self.assertEqual(expected, ret)
-
- def test_template_validate_nested_on(self):
- files = {'mynested.yaml': self.random_template}
- ret = self.client.stacks.validate(template=self.parent_template_noprop,
- files=files,
- show_nested=True)
- expected = {'Description': 'the parent template',
- 'Parameters': {},
- 'NestedParameters': {
- 'nres': {'Description': 'the stack description',
- 'Parameters': {'aparam': {'Default': 10,
- 'Description':
- 'the param '
- 'description',
- 'Label': 'aparam',
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'Type': 'mynested.yaml'}},
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {},
- 'resource_registry': {
- u'mynested.yaml': u'mynested.yaml',
- u'resources': {}}}}
- self.assertEqual(expected, ret)
-
- def test_template_validate_nested_on_multiple(self):
- # parent_template -> nested_template -> random_template
- nested_template = self.random_template.replace(
- 'OS::Heat::RandomString', 'mynested2.yaml')
- files = {'mynested.yaml': nested_template,
- 'mynested2.yaml': self.random_template}
- ret = self.client.stacks.validate(template=self.parent_template,
- files=files,
- show_nested=True)
-
- n_param2 = {'myres': {'Description': 'the stack description',
- 'Parameters': {'aparam': {'Default': 10,
- 'Description':
- 'the param '
- 'description',
- 'Label': 'aparam',
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'Type': 'mynested2.yaml'}}
- expected = {'Description': 'the parent template',
- 'Parameters': {
- 'pparam': {'Default': 5,
- 'Description': 'the param description',
- 'Label': 'pparam',
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'NestedParameters': {
- 'nres': {'Description': 'the stack description',
- 'Parameters': {'aparam': {'Default': 10,
- 'Description':
- 'the param '
- 'description',
- 'Label': 'aparam',
- 'Value': 5,
- 'NoEcho': 'false',
- 'Type': 'Number'}},
- 'NestedParameters': n_param2,
- 'Type': 'mynested.yaml'}},
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {},
- 'resource_registry': {
- u'mynested.yaml': u'mynested.yaml',
- 'resources': {}}}}
- self.assertEqual(expected, ret)
diff --git a/heat_integrationtests/functional/test_template_versions.py b/heat_integrationtests/functional/test_template_versions.py
new file mode 100644
index 000000000..2a268f0e6
--- /dev/null
+++ b/heat_integrationtests/functional/test_template_versions.py
@@ -0,0 +1,31 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from heat_integrationtests.functional import functional_base
+
+
+class TemplateVersionTest(functional_base.FunctionalTestsBase):
+ """This will test list template versions"""
+
+ def test_template_version(self):
+ template_versions = self.client.template_versions.list()
+ supported_template_versions = ["2013-05-23", "2014-10-16",
+ "2015-04-30", "2015-10-15",
+ "2012-12-12", "2010-09-09",
+ "2016-04-08", "2016-10-14", "newton",
+ "2017-02-24", "ocata",
+ "2017-09-01", "pike",
+ "2018-03-02", "queens"]
+ for template in template_versions:
+ self.assertIn(template.version.split(".")[1],
+ supported_template_versions)
diff --git a/heat_integrationtests/functional/test_templates.py b/heat_integrationtests/functional/test_templates.py
deleted file mode 100644
index 398af5ed5..000000000
--- a/heat_integrationtests/functional/test_templates.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from heat_integrationtests.functional import functional_base
-
-
-class TemplateAPITest(functional_base.FunctionalTestsBase):
- """This will test the following template calls:
-
- 1. Get the template content for the specific stack
- 2. List template versions
- 3. List resource types
- 4. Show resource details for OS::Heat::TestResource
- """
-
- template = {
- 'heat_template_version': '2014-10-16',
- 'description': 'Test Template APIs',
- 'resources': {
- 'test1': {
- 'type': 'OS::Heat::TestResource',
- 'properties': {
- 'update_replace': False,
- 'wait_secs': 0,
- 'value': 'Test1',
- 'fail': False,
- }
- }
- }
- }
-
- def test_get_stack_template(self):
- stack_identifier = self.stack_create(
- template=self.template
- )
- template_from_client = self.client.stacks.template(stack_identifier)
- self.assertEqual(self.template, template_from_client)
-
- def test_template_version(self):
- template_versions = self.client.template_versions.list()
- supported_template_versions = ["2013-05-23", "2014-10-16",
- "2015-04-30", "2015-10-15",
- "2012-12-12", "2010-09-09",
- "2016-04-08", "2016-10-14", "newton",
- "2017-02-24", "ocata",
- "2017-09-01", "pike",
- "2018-03-02", "queens"]
- for template in template_versions:
- self.assertIn(template.version.split(".")[1],
- supported_template_versions)
-
- def test_resource_types(self):
- resource_types = self.client.resource_types.list()
- self.assertTrue(any(resource.resource_type == "OS::Heat::TestResource"
- for resource in resource_types))
-
- def test_show_resource_template(self):
- resource_details = self.client.resource_types.get(
- resource_type="OS::Heat::TestResource"
- )
- self.assertEqual("OS::Heat::TestResource",
- resource_details['resource_type'])
diff --git a/heat_integrationtests/functional/test_unicode_template.py b/heat_integrationtests/functional/test_unicode_template.py
deleted file mode 100644
index d5776a409..000000000
--- a/heat_integrationtests/functional/test_unicode_template.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.functional import functional_base
-
-
-class StackUnicodeTemplateTest(functional_base.FunctionalTestsBase):
-
- random_template = u'''
-heat_template_version: 2014-10-16
-description: \u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0
-parameters:
- \u53c2\u6570:
- type: number
- default: 10
- label: \u6807\u7b7e
- description: \u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0
-resources:
- \u8d44\u6e90:
- type: OS::Heat::RandomString
- properties:
- length: {get_param: \u53c2\u6570}
-outputs:
- \u8f93\u51fa:
- description: \u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0
- value: {get_attr: [\u8d44\u6e90, value]}
-'''
-
- def _assert_results(self, result):
- self.assertTrue(result['disable_rollback'])
- self.assertIsNone(result['parent'])
- self.assertEqual(u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
- result['template_description'])
- self.assertEqual(u'10', result['parameters'][u'\u53c2\u6570'])
-
- def _assert_preview_results(self, result):
- self._assert_results(result)
- res = result['resources'][0]
- self.assertEqual('/resources/%s' % res['resource_name'],
- res['resource_identity']['path'])
-
- def _assert_create_results(self, result):
- self._assert_results(result)
- output = result['outputs'][0]
- self.assertEqual(u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
- output['description'])
- self.assertEqual(u'\u8f93\u51fa', output['output_key'])
- self.assertIsNotNone(output['output_value'])
-
- def _assert_resource_results(self, result):
- self.assertEqual(u'\u8d44\u6e90', result['resource_name'])
- self.assertEqual('OS::Heat::RandomString',
- result['resource_type'])
-
- def test_template_validate_basic(self):
- ret = self.client.stacks.validate(template=self.random_template)
- expected = {
- 'Description': u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
- 'Parameters': {
- u'\u53c2\u6570': {
- 'Default': 10,
- 'Description': u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
- 'Label': u'\u6807\u7b7e',
- 'NoEcho': 'false',
- 'Type': 'Number'}
- },
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {},
- 'resource_registry': {u'resources': {}}
- }
- }
- self.assertEqual(expected, ret)
-
- def test_template_validate_override_default(self):
- env = {'parameters': {u'\u53c2\u6570': 5}}
- ret = self.client.stacks.validate(template=self.random_template,
- environment=env)
- expected = {
- 'Description': u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
- 'Parameters': {
- u'\u53c2\u6570': {
- 'Default': 10,
- 'Value': 5,
- 'Description': u'\u8fd9\u662f\u4e00\u4e2a\u63cf\u8ff0',
- 'Label': u'\u6807\u7b7e',
- 'NoEcho': 'false',
- 'Type': 'Number'}
- },
- 'Environment': {
- 'event_sinks': [],
- 'parameter_defaults': {},
- 'parameters': {u'\u53c2\u6570': 5},
- 'resource_registry': {u'resources': {}}
- }
- }
- self.assertEqual(expected, ret)
-
- def test_stack_preview(self):
- result = self.client.stacks.preview(
- template=self.random_template,
- stack_name=self._stack_rand_name(),
- disable_rollback=True).to_dict()
- self._assert_preview_results(result)
-
- def test_create_stack(self):
- stack_identifier = self.stack_create(template=self.random_template)
- stack = self.client.stacks.get(stack_identifier)
- self._assert_create_results(stack.to_dict())
- rl = self.client.resources.list(stack_identifier)
- self.assertEqual(1, len(rl))
- self._assert_resource_results(rl[0].to_dict())
diff --git a/heat_integrationtests/functional/test_waitcondition.py b/heat_integrationtests/functional/test_waitcondition.py
deleted file mode 100644
index f79d49bed..000000000
--- a/heat_integrationtests/functional/test_waitcondition.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-
-from keystoneclient.v3 import client as keystoneclient
-from zaqarclient.queues.v2 import client as zaqarclient
-
-from heat_integrationtests.functional import functional_base
-
-
-class ZaqarWaitConditionTest(functional_base.FunctionalTestsBase):
- template = '''
-heat_template_version: "2013-05-23"
-
-resources:
- wait_condition:
- type: OS::Heat::WaitCondition
- properties:
- handle: {get_resource: wait_handle}
- timeout: 120
- wait_handle:
- type: OS::Heat::WaitConditionHandle
- properties:
- signal_transport: ZAQAR_SIGNAL
-
-outputs:
- wait_data:
- value: {'Fn::Select': ['data_id', {get_attr: [wait_condition, data]}]}
-'''
-
- def test_signal_queues(self):
- stack_identifier = self.stack_create(
- template=self.template,
- expected_status=None)
- self._wait_for_resource_status(stack_identifier, 'wait_handle',
- 'CREATE_COMPLETE')
- resource = self.client.resources.get(stack_identifier, 'wait_handle')
- signal = json.loads(resource.attributes['signal'])
- ks = keystoneclient.Client(
- auth_url=signal['auth_url'],
- user_id=signal['user_id'],
- password=signal['password'],
- project_id=signal['project_id'])
- endpoint = ks.service_catalog.url_for(
- service_type='messaging', endpoint_type='publicURL')
- conf = {
- 'auth_opts': {
- 'backend': 'keystone',
- 'options': {
- 'os_auth_token': ks.auth_token,
- 'os_project_id': signal['project_id']
- }
- }
- }
-
- zaqar = zaqarclient.Client(endpoint, conf=conf)
-
- queue = zaqar.queue(signal['queue_id'])
- queue.post({'body': {'data': 'here!', 'id': 'data_id'}, 'ttl': 600})
- self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
- stack = self.client.stacks.get(stack_identifier)
- self.assertEqual('here!', stack.outputs[0]['output_value'])
diff --git a/heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po b/heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po
index f250c529b..64cd09475 100644
--- a/heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po
+++ b/heat_integrationtests/locale/en_GB/LC_MESSAGES/heat_integrationtests.po
@@ -1,17 +1,17 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 10.0.0.dev107\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-10-06 21:39+0000\n"
+"POT-Creation-Date: 2018-02-28 16:10+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-10-06 07:42+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
-"Language: en-GB\n"
-"X-Generator: Zanata 3.9.6\n"
+"Language: en_GB\n"
+"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid "Please specify version in auth_url or auth_version in config."
diff --git a/heat_integrationtests/locale/ko_KR/LC_MESSAGES/heat_integrationtests.po b/heat_integrationtests/locale/ko_KR/LC_MESSAGES/heat_integrationtests.po
index aa46ed1a2..57353ed64 100644
--- a/heat_integrationtests/locale/ko_KR/LC_MESSAGES/heat_integrationtests.po
+++ b/heat_integrationtests/locale/ko_KR/LC_MESSAGES/heat_integrationtests.po
@@ -1,17 +1,17 @@
# minwook-shin <minwook0106@gmail.com>, 2017. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat 9.0.0.0b4.dev55\n"
+"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2017-08-12 12:09+0000\n"
+"POT-Creation-Date: 2018-02-28 16:10+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-08-05 01:47+0000\n"
"Last-Translator: minwook-shin <minwook0106@gmail.com>\n"
"Language-Team: Korean (South Korea)\n"
-"Language: ko-KR\n"
-"X-Generator: Zanata 3.9.6\n"
+"Language: ko_KR\n"
+"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=1; plural=0\n"
msgid "Please specify version in auth_url or auth_version in config."
diff --git a/heat_integrationtests/plugin.py b/heat_integrationtests/plugin.py
deleted file mode 100644
index 991b31e19..000000000
--- a/heat_integrationtests/plugin.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import os
-
-from tempest import config
-from tempest.test_discover import plugins
-
-from heat_integrationtests.common import config as heat_config
-
-
-class HeatTempestPlugin(plugins.TempestPlugin):
- def load_tests(self):
- base_path = os.path.split(os.path.dirname(
- os.path.abspath(__file__)))[0]
- test_dir = "heat_integrationtests"
- full_test_dir = os.path.join(base_path, test_dir)
- return full_test_dir, base_path
-
- def register_opts(self, conf):
- config.register_opt_group(conf, heat_config.service_available_group,
- heat_config.ServiceAvailableGroup)
- config.register_opt_group(conf, heat_config.heat_group,
- heat_config.HeatGroup)
- heat_config.CONF = config.CONF
-
- def get_opt_lists(self):
- return [(heat_config.heat_group.name,
- heat_config.HeatGroup)]
diff --git a/heat_integrationtests/post_test_hook.sh b/heat_integrationtests/post_test_hook.sh
index 51c76a0df..80e017937 100755
--- a/heat_integrationtests/post_test_hook.sh
+++ b/heat_integrationtests/post_test_hook.sh
@@ -21,7 +21,6 @@ sudo -E $DEST/heat/heat_integrationtests/prepare_test_env.sh
sudo -E $DEST/heat/heat_integrationtests/prepare_test_network.sh
cd $DEST/tempest
-sudo sed -i -e '/group_regex/c\group_regex=heat_integrationtests\\.api\\.test_heat_api(?:\\.|_)([^_]+)' .testr.conf
-sudo tempest run --regex heat_integrationtests
+sudo tox -evenv-tempest -- stestr --test-path=$DEST/heat/heat_integrationtests --top-dir=$DEST/heat --group_regex='heat_tempest_plugin\.tests\.api\.test_heat_api[._]([^_]+)' run
sudo -E $DEST/heat/heat_integrationtests/cleanup_test_env.sh
diff --git a/heat_integrationtests/pre_test_hook.sh b/heat_integrationtests/pre_test_hook.sh
index fd869ebcc..b9b53c8a1 100755
--- a/heat_integrationtests/pre_test_hook.sh
+++ b/heat_integrationtests/pre_test_hook.sh
@@ -31,7 +31,6 @@ echo -e 'logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(na
echo -e '[heat_api]\nworkers=2\n' >> $localconf
echo -e '[heat_api_cfn]\nworkers=2\n' >> $localconf
-echo -e '[heat_api_cloudwatch]\nworkers=2\n' >> $localconf
echo -e '[cache]\nenabled=True\n' >> $localconf
@@ -45,9 +44,9 @@ echo "[[local|localrc]]" >> $localconf
# to network
if [[ -e /etc/ci/mirror_info.sh ]]; then
source /etc/ci/mirror_info.sh
- echo "IMAGE_URLS+=${NODEPOOL_FEDORA_MIRROR}/releases/26/CloudImages/x86_64/images/Fedora-Cloud-Base-26-1.5.x86_64.qcow2" >> $localconf
+ echo "IMAGE_URLS+=${NODEPOOL_FEDORA_MIRROR}/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2" >> $localconf
else
- echo "IMAGE_URLS+=https://download.fedoraproject.org/pub/fedora/linux/releases/26/CloudImages/x86_64/images/Fedora-Cloud-Base-26-1.5.x86_64.qcow2" >> $localconf
+ echo "IMAGE_URLS+=https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2" >> $localconf
fi
echo "CEILOMETER_PIPELINE_INTERVAL=60" >> $localconf
diff --git a/heat_integrationtests/prepare_test_env.sh b/heat_integrationtests/prepare_test_env.sh
index 055723282..00e56eb2f 100755
--- a/heat_integrationtests/prepare_test_env.sh
+++ b/heat_integrationtests/prepare_test_env.sh
@@ -13,7 +13,8 @@
# under the License.
# This script creates required cloud resources and sets test options
-# in tempest.conf.
+# in heat_integrationtests.conf and in tempest.conf.
+# Credentials are required for creating nova flavors and glance images.
set -e
@@ -23,50 +24,68 @@ source $DEST/devstack/inc/ini-config
set -x
-conf_file=$DEST/tempest/etc/tempest.conf
-
-iniset_multiline $conf_file service_available heat_plugin True
-
-source $DEST/devstack/openrc demo demo
-# user creds
-iniset $conf_file heat_plugin username $OS_USERNAME
-iniset $conf_file heat_plugin password $OS_PASSWORD
-iniset $conf_file heat_plugin project_name $OS_PROJECT_NAME
-iniset $conf_file heat_plugin auth_url $OS_AUTH_URL
-iniset $conf_file heat_plugin user_domain_id $OS_USER_DOMAIN_ID
-iniset $conf_file heat_plugin project_domain_id $OS_PROJECT_DOMAIN_ID
-iniset $conf_file heat_plugin user_domain_name $OS_USER_DOMAIN_NAME
-iniset $conf_file heat_plugin project_domain_name $OS_PROJECT_DOMAIN_NAME
-iniset $conf_file heat_plugin region $OS_REGION_NAME
-iniset $conf_file heat_plugin auth_version $OS_IDENTITY_API_VERSION
-
-source $DEST/devstack/openrc admin admin
-iniset $conf_file heat_plugin admin_username $OS_USERNAME
-iniset $conf_file heat_plugin admin_password $OS_PASSWORD
-
-
-# Register the flavors for booting test servers
-iniset $conf_file heat_plugin instance_type m1.heat_int
-iniset $conf_file heat_plugin minimal_instance_type m1.heat_micro
-openstack flavor create m1.heat_int --ram 512
-openstack flavor create m1.heat_micro --ram 128
-
-iniset $conf_file heat_plugin image_ref Fedora-Cloud-Base-26-1.5.x86_64
-iniset $conf_file heat_plugin boot_config_env $DEST/heat-templates/hot/software-config/boot-config/test_image_env.yaml
-iniset $conf_file heat_plugin heat_config_notify_script $DEST/heat-templates/hot/software-config/elements/heat-config/bin/heat-config-notify
-iniset $conf_file heat_plugin minimal_image_ref cirros-0.3.5-x86_64-disk
-
-# Skip ReloadOnSighupTest. Most jobs now run with apache+uwsgi, so the test has no significance
-# Skip NotificationTest till bug #1721202 is fixed
-iniset $conf_file heat_plugin skip_functional_test_list 'ReloadOnSighupTest, NotificationTest'
-
-# Add scenario tests to skip
-# VolumeBackupRestoreIntegrationTest skipped until failure rate can be reduced ref bug #1382300
-# test_server_signal_userdata_format_software_config is skipped untill bug #1651768 is resolved
-iniset $conf_file heat_plugin skip_scenario_test_list 'SoftwareConfigIntegrationTest, VolumeBackupRestoreIntegrationTest'
-
-if [ "$DISABLE_CONVERGENCE" == "true" ]; then
- iniset $conf_file heat_plugin convergence_engine_enabled false
-fi
-
-cat $conf_file
+function _config_iniset {
+ local conf_file=$1
+
+ source $DEST/devstack/openrc demo demo
+ # user creds
+ iniset $conf_file heat_plugin username $OS_USERNAME
+ iniset $conf_file heat_plugin password $OS_PASSWORD
+ iniset $conf_file heat_plugin project_name $OS_PROJECT_NAME
+ iniset $conf_file heat_plugin auth_url $OS_AUTH_URL
+ iniset $conf_file heat_plugin user_domain_id $OS_USER_DOMAIN_ID
+ iniset $conf_file heat_plugin project_domain_id $OS_PROJECT_DOMAIN_ID
+ iniset $conf_file heat_plugin user_domain_name $OS_USER_DOMAIN_NAME
+ iniset $conf_file heat_plugin project_domain_name $OS_PROJECT_DOMAIN_NAME
+ iniset $conf_file heat_plugin region $OS_REGION_NAME
+ iniset $conf_file heat_plugin auth_version $OS_IDENTITY_API_VERSION
+
+ source $DEST/devstack/openrc admin admin
+ iniset $conf_file heat_plugin admin_username $OS_USERNAME
+ iniset $conf_file heat_plugin admin_password $OS_PASSWORD
+
+ # Register the flavors for booting test servers
+ iniset $conf_file heat_plugin instance_type m1.heat_int
+ iniset $conf_file heat_plugin minimal_instance_type m1.heat_micro
+
+ iniset $conf_file heat_plugin image_ref Fedora-Cloud-Base-27-1.6.x86_64
+ iniset $conf_file heat_plugin minimal_image_ref cirros-0.3.5-x86_64-disk
+
+ if [ "$DISABLE_CONVERGENCE" == "true" ]; then
+ iniset $conf_file heat_plugin convergence_engine_enabled false
+ fi
+}
+
+
+function _config_functionaltests
+{
+ local conf_file=$DEST/heat/heat_integrationtests/heat_integrationtests.conf
+ _config_iniset $conf_file
+
+ # Skip NotificationTest till bug #1721202 is fixed
+ iniset $conf_file heat_plugin skip_functional_test_list 'NotificationTest'
+
+ cat $conf_file
+}
+
+function _config_tempest_plugin
+{
+ local conf_file=$DEST/tempest/etc/tempest.conf
+ iniset_multiline $conf_file service_available heat_plugin True
+ _config_iniset $conf_file
+ iniset $conf_file heat_plugin heat_config_notify_script $DEST/heat-templates/hot/software-config/elements/heat-config/bin/heat-config-notify
+ iniset $conf_file heat_plugin boot_config_env $DEST/heat-templates/hot/software-config/boot-config/test_image_env.yaml
+
+ # Skip VolumeBackupRestoreIntegrationTest skipped until failure rate can be reduced ref bug #1382300
+ # Skip test_server_signal_userdata_format_software_config is skipped untill bug #1651768 is resolved
+ iniset $conf_file heat_plugin skip_scenario_test_list 'SoftwareConfigIntegrationTest, VolumeBackupRestoreIntegrationTest'
+ iniset $conf_file heat_plugin skip_functional_test_list ''
+
+ cat $conf_file
+}
+
+_config_functionaltests
+_config_tempest_plugin
+
+openstack flavor show m1.heat_int || openstack flavor create m1.heat_int --ram 512
+openstack flavor show m1.heat_micro || openstack flavor create m1.heat_micro --ram 128
diff --git a/heat_integrationtests/prepare_test_network.sh b/heat_integrationtests/prepare_test_network.sh
index a69af4ea6..01125a89c 100755
--- a/heat_integrationtests/prepare_test_network.sh
+++ b/heat_integrationtests/prepare_test_network.sh
@@ -20,6 +20,7 @@ HEAT_PRIVATE_SUBNET_CIDR=10.0.5.0/24
# create a heat specific private network (default 'private' network has ipv6 subnet)
source $DEST/devstack/openrc demo demo
-openstack network create heat-net
-openstack subnet create heat-subnet --network heat-net --subnet-range $HEAT_PRIVATE_SUBNET_CIDR
+
+openstack network show heat-net || openstack network create heat-net
+openstack subnet show heat-subnet || openstack subnet create heat-subnet --network heat-net --subnet-range $HEAT_PRIVATE_SUBNET_CIDR
openstack router add subnet router1 heat-subnet
diff --git a/heat_integrationtests/scenario/__init__.py b/heat_integrationtests/scenario/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/heat_integrationtests/scenario/__init__.py
+++ /dev/null
diff --git a/heat_integrationtests/scenario/scenario_base.py b/heat_integrationtests/scenario/scenario_base.py
deleted file mode 100644
index c48d64dc3..000000000
--- a/heat_integrationtests/scenario/scenario_base.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_utils import reflection
-
-from heat_integrationtests.common import test
-
-
-class ScenarioTestsBase(test.HeatIntegrationTest):
- """This class defines common parameters for scenario tests."""
-
- def setUp(self):
- super(ScenarioTestsBase, self).setUp()
- self.check_skip()
- self.sub_dir = 'templates'
- self.assign_keypair()
-
- if not self.conf.fixed_network_name:
- raise self.skipException("No default network configured to test")
- self.net = self._get_network()
-
- if not self.conf.minimal_image_ref:
- raise self.skipException("No minimal image configured to test")
- if not self.conf.minimal_instance_type:
- raise self.skipException("No minimal flavor configured to test")
-
- def launch_stack(self, template_name, expected_status='CREATE_COMPLETE',
- parameters=None, **kwargs):
- template = self._load_template(__file__, template_name, self.sub_dir)
-
- parameters = parameters or {}
-
- if kwargs.get('add_parameters'):
- parameters.update(kwargs['add_parameters'])
-
- stack_id = self.stack_create(
- stack_name=kwargs.get('stack_name'),
- template=template,
- files=kwargs.get('files'),
- parameters=parameters,
- environment=kwargs.get('environment'),
- expected_status=expected_status
- )
-
- return stack_id
-
- def check_skip(self):
- test_cls_name = reflection.get_class_name(self, fully_qualified=False)
- test_method_name = '.'.join([test_cls_name, self._testMethodName])
- test_skipped = (self.conf.skip_scenario_test_list and (
- test_cls_name in self.conf.skip_scenario_test_list or
- test_method_name in self.conf.skip_scenario_test_list))
- if self.conf.skip_scenario_tests or test_skipped:
- self.skipTest('Test disabled in conf, skipping')
diff --git a/heat_integrationtests/scenario/templates/app_server_lbv2_neutron.yaml b/heat_integrationtests/scenario/templates/app_server_lbv2_neutron.yaml
deleted file mode 100644
index f750a9862..000000000
--- a/heat_integrationtests/scenario/templates/app_server_lbv2_neutron.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
-heat_template_version: 2015-10-15
-
-description: |
- App server that is a member of Neutron Pool.
-
-parameters:
-
- image:
- type: string
-
- flavor:
- type: string
-
- net:
- type: string
-
- sec_group:
- type: string
-
- pool:
- type: string
-
- app_port:
- type: number
-
- timeout:
- type: number
-
- subnet:
- type: string
-
-resources:
-
- config:
- type: OS::Test::WebAppConfig
- properties:
- app_port: { get_param: app_port }
- wc_curl_cli: { get_attr: [ handle, curl_cli ] }
-
- server:
- type: OS::Nova::Server
- properties:
- image: { get_param: image }
- flavor: { get_param: flavor }
- networks:
- - network: { get_param: net }
- security_groups:
- - { get_param: sec_group }
- user_data_format: RAW
- user_data: { get_resource: config }
-
- handle:
- type: OS::Heat::WaitConditionHandle
-
- waiter:
- type: OS::Heat::WaitCondition
- depends_on: server
- properties:
- timeout: { get_param: timeout }
- handle: { get_resource: handle }
-
- pool_member:
- type: OS::Neutron::LBaaS::PoolMember
- depends_on: waiter
- properties:
- address: { get_attr: [ server, networks, { get_param: net }, 0 ] }
- pool: { get_param: pool }
- protocol_port: { get_param: app_port }
- subnet: { get_param: subnet }
diff --git a/heat_integrationtests/scenario/templates/app_server_neutron.yaml b/heat_integrationtests/scenario/templates/app_server_neutron.yaml
deleted file mode 100644
index 9cbf82ab6..000000000
--- a/heat_integrationtests/scenario/templates/app_server_neutron.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-heat_template_version: 2015-10-15
-
-description: |
- App server that is a member of Neutron Pool.
-
-parameters:
-
- image:
- type: string
-
- flavor:
- type: string
-
- net:
- type: string
-
- sec_group:
- type: string
-
- pool_id:
- type: string
-
- app_port:
- type: number
-
- timeout:
- type: number
-
-resources:
-
- config:
- type: OS::Test::WebAppConfig
- properties:
- app_port: { get_param: app_port }
- wc_curl_cli: { get_attr: [ handle, curl_cli ] }
-
- server:
- type: OS::Nova::Server
- properties:
- image: { get_param: image }
- flavor: { get_param: flavor }
- networks:
- - network: { get_param: net }
- security_groups:
- - { get_param: sec_group }
- user_data_format: RAW
- user_data: { get_resource: config }
-
- handle:
- type: OS::Heat::WaitConditionHandle
-
- waiter:
- type: OS::Heat::WaitCondition
- depends_on: server
- properties:
- timeout: { get_param: timeout }
- handle: { get_resource: handle }
-
- pool_member:
- type: OS::Neutron::PoolMember
- depends_on: waiter
- properties:
- address: { get_attr: [ server, networks, { get_param: net }, 0 ] }
- pool_id: { get_param: pool_id }
- protocol_port: { get_param: app_port }
diff --git a/heat_integrationtests/scenario/templates/boot_config_none_env.yaml b/heat_integrationtests/scenario/templates/boot_config_none_env.yaml
deleted file mode 100644
index 91d130cd7..000000000
--- a/heat_integrationtests/scenario/templates/boot_config_none_env.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-# Defines a Heat::InstallConfigAgent config resource which performs no config.
-# This environment can be used when the image already has the required agents
-# installed and configured.
-resource_registry:
- "Heat::InstallConfigAgent": "OS::Heat::SoftwareConfig" \ No newline at end of file
diff --git a/heat_integrationtests/scenario/templates/netcat-webapp.yaml b/heat_integrationtests/scenario/templates/netcat-webapp.yaml
deleted file mode 100644
index fdb03359f..000000000
--- a/heat_integrationtests/scenario/templates/netcat-webapp.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-heat_template_version: 2015-10-15
-
-description: |
- Simplest web-app using netcat reporting only hostname.
- Specifically tailored for minimal Cirros image.
-
-parameters:
-
- app_port:
- type: number
-
- wc_curl_cli:
- type: string
-
-resources:
-
- webapp_nc:
- type: OS::Heat::SoftwareConfig
- properties:
- group: ungrouped
- config:
- str_replace:
- template: |
- #! /bin/sh -v
- Body=$(hostname)
- Response="HTTP/1.1 200 OK\r\nContent-Length: ${#Body}\r\n\r\n$Body"
- wc_notify --data-binary '{"status": "SUCCESS"}'
- while true ; do echo -e $Response | nc -llp PORT; done
- params:
- PORT: { get_param: app_port }
- wc_notify: { get_param: wc_curl_cli }
-
-outputs:
- OS::stack_id:
- value: { get_resource: webapp_nc }
diff --git a/heat_integrationtests/scenario/templates/test_aodh_alarm.yaml b/heat_integrationtests/scenario/templates/test_aodh_alarm.yaml
deleted file mode 100644
index 0dc6a275f..000000000
--- a/heat_integrationtests/scenario/templates/test_aodh_alarm.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-heat_template_version: 2013-05-23
-parameters:
- metric_id:
- type: string
-resources:
- asg:
- type: OS::Heat::AutoScalingGroup
- properties:
- max_size: 5
- min_size: 1
- resource:
- type: OS::Heat::RandomString
- scaleup_policy:
- type: OS::Heat::ScalingPolicy
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: {get_resource: asg}
- cooldown: 0
- scaling_adjustment: 1
- alarm:
- type: OS::Aodh::GnocchiAggregationByMetricsAlarm
- properties:
- metrics:
- - {get_param: metric_id}
- comparison_operator: ge
- evaluation_periods: 1
- granularity: 60
- aggregation_method: mean
- threshold: 10
- alarm_actions:
- - str_replace:
- template: trust+url
- params:
- url: {get_attr: [scaleup_policy, signal_url]}
-outputs:
- asg_size:
- value: {get_attr: [asg, current_size]}
diff --git a/heat_integrationtests/scenario/templates/test_autoscaling_lb_neutron.yaml b/heat_integrationtests/scenario/templates/test_autoscaling_lb_neutron.yaml
deleted file mode 100644
index d47e78761..000000000
--- a/heat_integrationtests/scenario/templates/test_autoscaling_lb_neutron.yaml
+++ /dev/null
@@ -1,113 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: |
- Template which tests Neutron load balancing requests to members of
- Heat AutoScalingGroup.
- Instances must be running some webserver on a given app_port
- producing HTTP response that is different between servers
- but stable over time for given server.
-
-parameters:
- flavor:
- type: string
- image:
- type: string
- net:
- type: string
- subnet:
- type: string
- public_net:
- type: string
- app_port:
- type: number
- default: 8080
- lb_port:
- type: number
- default: 80
- timeout:
- type: number
- default: 600
-
-resources:
-
- sec_group:
- type: OS::Neutron::SecurityGroup
- properties:
- rules:
- - remote_ip_prefix: 0.0.0.0/0
- protocol: tcp
- port_range_min: { get_param: app_port }
- port_range_max: { get_param: app_port }
-
- asg:
- type: OS::Heat::AutoScalingGroup
- properties:
- desired_capacity: 1
- max_size: 2
- min_size: 1
- resource:
- type: OS::Test::NeutronAppServer
- properties:
- image: { get_param: image }
- flavor: { get_param: flavor }
- net: { get_param: net}
- sec_group: { get_resource: sec_group }
- app_port: { get_param: app_port }
- pool_id: { get_resource: pool }
- timeout: { get_param: timeout }
-
- scale_up:
- type: OS::Heat::ScalingPolicy
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: { get_resource: asg }
- scaling_adjustment: 1
-
- scale_down:
- type: OS::Heat::ScalingPolicy
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: { get_resource: asg }
- scaling_adjustment: -1
-
- health_monitor:
- type: OS::Neutron::HealthMonitor
- properties:
- delay: 3
- type: HTTP
- timeout: 3
- max_retries: 3
-
- pool:
- type: OS::Neutron::Pool
- properties:
- lb_method: ROUND_ROBIN
- protocol: HTTP
- subnet: { get_param: subnet }
- monitors:
- - { get_resource: health_monitor }
- vip:
- protocol_port: { get_param: lb_port }
-
- floating_ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: public_net }
- port_id:
- { get_attr: [pool, vip, 'port_id'] }
-
- loadbalancer:
- type: OS::Neutron::LoadBalancer
- properties:
- pool_id: { get_resource: pool }
- protocol_port: { get_param: app_port }
-
-outputs:
- lburl:
- description: URL of the loadbalanced app
- value:
- str_replace:
- template: http://IP_ADDRESS:PORT
- params:
- IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] }
- PORT: { get_param: lb_port }
diff --git a/heat_integrationtests/scenario/templates/test_autoscaling_lbv2_neutron.yaml b/heat_integrationtests/scenario/templates/test_autoscaling_lbv2_neutron.yaml
deleted file mode 100644
index 470236631..000000000
--- a/heat_integrationtests/scenario/templates/test_autoscaling_lbv2_neutron.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: |
- Template which tests Neutron load balancing requests to members of
- Heat AutoScalingGroup. This uses LBaas V2.
- Instances must be running some webserver on a given app_port
- producing HTTP response that is different between servers
- but stable over time for given server.
-
-parameters:
- flavor:
- type: string
- image:
- type: string
- net:
- type: string
- subnet:
- type: string
- public_net:
- type: string
- app_port:
- type: number
- default: 8080
- lb_port:
- type: number
- default: 80
- timeout:
- type: number
- default: 600
-
-resources:
-
- sec_group:
- type: OS::Neutron::SecurityGroup
- properties:
- rules:
- - remote_ip_prefix: 0.0.0.0/0
- protocol: tcp
- port_range_min: { get_param: app_port }
- port_range_max: { get_param: app_port }
-
- asg:
- type: OS::Heat::AutoScalingGroup
- properties:
- desired_capacity: 1
- max_size: 2
- min_size: 1
- resource:
- type: OS::Test::NeutronAppServer
- properties:
- image: { get_param: image }
- flavor: { get_param: flavor }
- net: { get_param: net}
- sec_group: { get_resource: sec_group }
- app_port: { get_param: app_port }
- pool: { get_resource: pool }
- subnet: { get_param: subnet }
- timeout: { get_param: timeout }
-
- scale_up:
- type: OS::Heat::ScalingPolicy
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: { get_resource: asg }
- scaling_adjustment: 1
-
- scale_down:
- type: OS::Heat::ScalingPolicy
- properties:
- adjustment_type: change_in_capacity
- auto_scaling_group_id: { get_resource: asg }
- scaling_adjustment: -1
-
- health_monitor:
- type: OS::Neutron::LBaaS::HealthMonitor
- properties:
- delay: 3
- type: HTTP
- timeout: 3
- max_retries: 3
- pool: { get_resource: pool }
-
- pool:
- type: OS::Neutron::LBaaS::Pool
- properties:
- lb_algorithm: ROUND_ROBIN
- protocol: HTTP
- listener: { get_resource: listener }
-
- listener:
- type: OS::Neutron::LBaaS::Listener
- properties:
- loadbalancer: { get_resource: loadbalancer }
- protocol: HTTP
- protocol_port: { get_param: lb_port }
-
- loadbalancer:
- type: OS::Neutron::LBaaS::LoadBalancer
- properties:
- vip_subnet: { get_param: subnet }
-
- floating_ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: public_net }
- port_id: { get_attr: [loadbalancer, vip_port_id] }
-
-outputs:
- lburl:
- description: URL of the loadbalanced app
- value:
- str_replace:
- template: http://IP_ADDRESS:PORT
- params:
- IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] }
- PORT: { get_param: lb_port }
diff --git a/heat_integrationtests/scenario/templates/test_base_resources.yaml b/heat_integrationtests/scenario/templates/test_base_resources.yaml
deleted file mode 100644
index bff618550..000000000
--- a/heat_integrationtests/scenario/templates/test_base_resources.yaml
+++ /dev/null
@@ -1,110 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
- This HOT template that just defines a single server.
- Contains just base features to verify base heat support.
-
-parameters:
- key_name:
- type: string
- default: key-01
- description: Name of an existing key pair to use for the server
- flavor:
- type: string
- description: Flavor for the server to be created
- default: m1.small
- constraints:
- - custom_constraint: nova.flavor
- image:
- type: string
- description: Image ID or image name to use for the server
- constraints:
- - custom_constraint: glance.image
- vol_size:
- type: number
- description: The size of the Cinder volume
- default: 1
- private_net_name:
- type: string
- default: private-net-01
- description: Name of private network to be created
- private_net_cidr:
- type: string
- default: 192.168.101.0/24
- description: Private network address (CIDR notation)
- private_net_gateway:
- type: string
- default: 192.168.101.1
- description: Private network gateway address
- private_net_pool_start:
- type: string
- default: 192.168.101.2
- description: Start of private network IP address allocation pool
- private_net_pool_end:
- type: string
- default: 192.168.101.127
- description: End of private network IP address allocation pool
- echo_foo:
- default: fooooo
- type: string
-
-resources:
- private_net:
- type: OS::Neutron::Net
- properties:
- name: { get_param: private_net_name }
-
- private_subnet:
- type: OS::Neutron::Subnet
- properties:
- network_id: { get_resource: private_net }
- cidr: { get_param: private_net_cidr }
- gateway_ip: { get_param: private_net_gateway }
- allocation_pools:
- - start: { get_param: private_net_pool_start }
- end: { get_param: private_net_pool_end }
-
- server_port:
- type: OS::Neutron::Port
- properties:
- network_id: { get_resource: private_net }
- fixed_ips:
- - subnet_id: { get_resource: private_subnet }
-
- key:
- type: OS::Nova::KeyPair
- properties:
- name: { get_param: key_name }
-
- server:
- type: OS::Nova::Server
- properties:
- key_name: { get_resource: key }
- image: { get_param: image }
- flavor: { get_param: flavor }
- networks:
- - port: { get_resource: server_port }
- user_data:
- str_replace:
- template: |
- #!/bin/bash
- echo echo_foo
- params:
- echo_foo: { get_param: echo_foo }
-
- vol:
- type: OS::Cinder::Volume
- properties:
- size: { get_param: vol_size }
-
- vol_att:
- type: OS::Cinder::VolumeAttachment
- properties:
- instance_uuid: { get_resource: server }
- volume_id: { get_resource: vol }
- mountpoint: /dev/vdb
-
-outputs:
- server_networks:
- description: The networks of the deployed server
- value: { get_attr: [server, networks] }
diff --git a/heat_integrationtests/scenario/templates/test_server_cfn_init.yaml b/heat_integrationtests/scenario/templates/test_server_cfn_init.yaml
deleted file mode 100644
index 9f947170a..000000000
--- a/heat_integrationtests/scenario/templates/test_server_cfn_init.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-HeatTemplateFormatVersion: '2012-12-12'
-Description: |
- Template which uses a wait condition to confirm that a minimal
- cfn-init and cfn-signal has worked
-Parameters:
- key_name:
- Type: String
- flavor:
- Type: String
- image:
- Type: String
- subnet:
- Type: String
- timeout:
- Type: Number
-Resources:
- CfnUser:
- Type: AWS::IAM::User
- SmokeSecurityGroup:
- Type: AWS::EC2::SecurityGroup
- Properties:
- GroupDescription: Enable only ping and SSH access
- SecurityGroupIngress:
- - {CidrIp: 0.0.0.0/0, FromPort: '-1', IpProtocol: icmp, ToPort: '-1'}
- - {CidrIp: 0.0.0.0/0, FromPort: '22', IpProtocol: tcp, ToPort: '22'}
- SmokeKeys:
- Type: AWS::IAM::AccessKey
- Properties:
- UserName: {Ref: CfnUser}
-
- ElasticIp:
- Type: AWS::EC2::EIP
- Properties:
- Domain: vpc
-
- SmokeServerElasticIp:
- Type: AWS::EC2::EIPAssociation
- Properties:
- EIP: {Ref: ElasticIp}
- InstanceId: {Ref: SmokeServer}
-
- SmokeServer:
- Type: AWS::EC2::Instance
- Metadata:
- AWS::CloudFormation::Init:
- config:
- files:
- /tmp/smoke-status:
- content: smoke test complete
- /etc/cfn/cfn-credentials:
- content:
- Fn::Replace:
- - SmokeKeys: {Ref: SmokeKeys}
- SecretAccessKey:
- 'Fn::GetAtt': [SmokeKeys, SecretAccessKey]
- - |
- AWSAccessKeyId=SmokeKeys
- AWSSecretKey=SecretAccessKey
- mode: '000400'
- owner: root
- group: root
- Properties:
- ImageId: {Ref: image}
- InstanceType: {Ref: flavor}
- KeyName: {Ref: key_name}
- SubnetId: {Ref: subnet}
- SecurityGroups:
- - {Ref: SmokeSecurityGroup}
- UserData:
- Fn::Replace:
- - WaitHandle: {Ref: WaitHandle}
- - |
- #!/bin/bash -v
- /opt/aws/bin/cfn-init
- /opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" \
- --id smoke_status "WaitHandle"
- WaitHandle:
- Type: AWS::CloudFormation::WaitConditionHandle
- WaitCondition:
- Type: AWS::CloudFormation::WaitCondition
- DependsOn: SmokeServer
- Properties:
- Handle: {Ref: WaitHandle}
- Timeout: {Ref: timeout}
-Outputs:
- WaitConditionStatus:
- Description: Contents of /tmp/smoke-status on SmokeServer
- Value:
- Fn::GetAtt: [WaitCondition, Data]
- ElasticIp_Id:
- Description: Elastic ip allocation id
- Value:
- Fn::GetAtt: [ElasticIp, AllocationId]
- SmokeServerElasticIp:
- Description: Elastic ip address of server
- Value:
- Ref: ElasticIp
diff --git a/heat_integrationtests/scenario/templates/test_server_signal.yaml b/heat_integrationtests/scenario/templates/test_server_signal.yaml
deleted file mode 100644
index 4466a5ea1..000000000
--- a/heat_integrationtests/scenario/templates/test_server_signal.yaml
+++ /dev/null
@@ -1,107 +0,0 @@
-heat_template_version: 2013-05-23
-description: |
- Template which uses a wait condition to confirm that a minimal
- signalling works in a created network
-parameters:
- key_name:
- type: string
- flavor:
- type: string
- image:
- type: string
- subnet_cidr:
- type: string
- default: 10.100.0.0/16
- timeout:
- type: number
- public_net:
- type: string
- default: public
- private_net:
- type: string
- default: heat-net
- dns_servers:
- type: comma_delimited_list
- default: ["8.8.8.8", "8.8.4.4"]
- user_data_format:
- type: string
- default: RAW
-resources:
- sg:
- type: OS::Neutron::SecurityGroup
- properties:
- name: the_sg
- description: Ping and SSH
- rules:
- - protocol: icmp
- - protocol: tcp
- port_range_min: 22
- port_range_max: 22
-
- floating_ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: {get_param: public_net}
-
- network:
- type: OS::Neutron::Net
-
- subnet:
- type: OS::Neutron::Subnet
- properties:
- network: {get_resource: network}
- ip_version: 4
- cidr: {get_param: subnet_cidr}
- dns_nameservers: {get_param: dns_servers}
-
- router:
- type: OS::Neutron::Router
- properties:
- external_gateway_info:
- network: {get_param: public_net}
-
- router_interface:
- type: OS::Neutron::RouterInterface
- properties:
- router: {get_resource: router}
- subnet: {get_resource: subnet}
-
- wait_handle:
- type: OS::Heat::WaitConditionHandle
-
- server:
- type: OS::Nova::Server
- properties:
- image: {get_param: image}
- flavor: {get_param: flavor}
- key_name: {get_param: key_name}
- networks:
- - subnet: {get_resource: subnet}
- security_groups:
- - {get_resource: sg}
- user_data_format: {get_param: user_data_format}
- user_data:
- str_replace:
- template: |
- #!/bin/sh
- wc_notify --data-binary '{"status": "SUCCESS", "data": "test complete"}'
- params:
- wc_notify: { get_attr: ['wait_handle', 'curl_cli'] }
-
- server_floating_ip_assoc:
- type: OS::Neutron::FloatingIPAssociation
- properties:
- floatingip_id: {get_resource: floating_ip}
- port_id: {get_attr: [server, addresses, {get_resource: network}, 0, port]}
-
- wait_condition:
- type: OS::Heat::WaitCondition
- properties:
- handle: {get_resource: wait_handle}
- timeout: {get_param: timeout}
-
-outputs:
- server_ip:
- value: {get_attr: [floating_ip, floating_ip_address]}
- wc_data:
- value: {get_attr: [wait_condition, data]}
diff --git a/heat_integrationtests/scenario/templates/test_server_software_config.yaml b/heat_integrationtests/scenario/templates/test_server_software_config.yaml
deleted file mode 100644
index bf8fa9bbc..000000000
--- a/heat_integrationtests/scenario/templates/test_server_software_config.yaml
+++ /dev/null
@@ -1,173 +0,0 @@
-heat_template_version: 2014-10-16
-parameters:
- key_name:
- type: string
- flavor:
- type: string
- image:
- type: string
- network:
- type: string
- signal_transport:
- type: string
- default: CFN_SIGNAL
- software_config_transport:
- type: string
- default: POLL_SERVER_CFN
- dep1_foo:
- default: fooooo
- type: string
- dep1_bar:
- default: baaaaa
- type: string
- dep2a_bar:
- type: string
- default: barrr
- dep3_foo:
- default: fo
- type: string
- dep3_bar:
- default: ba
- type: string
-
-resources:
-
- the_sg:
- type: OS::Neutron::SecurityGroup
- properties:
- name: the_sg
- description: Ping and SSH
- rules:
- - protocol: icmp
- - protocol: tcp
- port_range_min: 22
- port_range_max: 22
-
- cfg1:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: foo
- - name: bar
- outputs:
- - name: result
- config: {get_file: cfg1.sh}
-
- cfg2a:
- type: OS::Heat::StructuredConfig
- properties:
- group: cfn-init
- inputs:
- - name: bar
- config:
- config:
- files:
- /tmp/cfn-init-foo:
- content:
- get_input: bar
- mode: '000644'
-
- cfg2b:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- outputs:
- - name: result
- config: |
- #!/bin/sh
- echo -n "The file /tmp/cfn-init-foo contains `cat /tmp/cfn-init-foo` for server $deploy_server_id during $deploy_action" > $heat_outputs_path.result
-
- cfg3:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- inputs:
- - name: foo
- - name: bar
- outputs:
- - name: result
- config: {get_file: cfg3.pp}
-
- dep1:
- type: OS::Heat::SoftwareDeployment
- properties:
- config:
- get_resource: cfg1
- server:
- get_resource: server
- input_values:
- foo: {get_param: dep1_foo}
- bar: {get_param: dep1_bar}
- signal_transport: {get_param: signal_transport}
-
- dep2a:
- type: OS::Heat::StructuredDeployment
- properties:
- name: 10_dep2a
- signal_transport: NO_SIGNAL
- config:
- get_resource: cfg2a
- server:
- get_resource: server
- input_values:
- bar: {get_param: dep2a_bar}
-
- dep2b:
- type: OS::Heat::SoftwareDeployment
- properties:
- name: 20_dep2b
- config:
- get_resource: cfg2b
- server:
- get_resource: server
- signal_transport: {get_param: signal_transport}
-
- dep3:
- type: OS::Heat::SoftwareDeployment
- properties:
- config:
- get_resource: cfg3
- server:
- get_resource: server
- input_values:
- foo: {get_param: dep3_foo}
- bar: {get_param: dep3_bar}
- signal_transport: {get_param: signal_transport}
-
- cfg_user_data:
- type: Heat::InstallConfigAgent
-
- server:
- type: OS::Nova::Server
- properties:
- image: {get_param: image}
- flavor: {get_param: flavor}
- key_name: {get_param: key_name}
- security_groups:
- - {get_resource: the_sg}
- networks:
- - network: {get_param: network}
- user_data_format: SOFTWARE_CONFIG
- software_config_transport: {get_param: software_config_transport}
- user_data: {get_attr: [cfg_user_data, config]}
-
-outputs:
- res1:
- value:
- result: {get_attr: [dep1, result]}
- stdout: {get_attr: [dep1, deploy_stdout]}
- stderr: {get_attr: [dep1, deploy_stderr]}
- status_code: {get_attr: [dep1, deploy_status_code]}
- res2:
- value:
- result: {get_attr: [dep2b, result]}
- stdout: {get_attr: [dep2b, deploy_stdout]}
- stderr: {get_attr: [dep2b, deploy_stderr]}
- status_code: {get_attr: [dep2b, deploy_status_code]}
- res3:
- value:
- result: {get_attr: [dep3, result]}
- stdout: {get_attr: [dep3, deploy_stdout]}
- stderr: {get_attr: [dep3, deploy_stderr]}
- status_code: {get_attr: [dep3, deploy_status_code]}
diff --git a/heat_integrationtests/scenario/templates/test_volumes_create_from_backup.yaml b/heat_integrationtests/scenario/templates/test_volumes_create_from_backup.yaml
deleted file mode 100644
index ab1edf88a..000000000
--- a/heat_integrationtests/scenario/templates/test_volumes_create_from_backup.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-heat_template_version: 2013-05-23
-
-parameters:
- key_name:
- type: string
- description: keypair to enable SSH access to the instance.
-
- instance_type:
- type: string
- description: Type of the instance to be created.
-
- image_id:
- type: string
- description: ID of the image to use for the instance to be created.
-
- timeout:
- type: number
- description: Stack creation timeout
-
- dev_name:
- type: string
- description: Expected device name for volume
- default: vdb
-
- rescan_timeout:
- type: number
- description: Max number of seconds to wait for volume after rescan
- default: 120
-
- backup_id:
- type: string
- description: backup_id to create volume from
-
- network:
- type: string
-
- volume_description:
- type: string
- description: Description of volume
- default: A volume description
-
-resources:
- volume:
- type: OS::Cinder::Volume
- properties:
- backup_id: { get_param: backup_id }
- description: { get_param: volume_description }
-
- volume_attachment:
- type: OS::Cinder::VolumeAttachment
- properties:
- volume_id: { get_resource: volume }
- instance_uuid: { get_resource: instance }
-
- instance:
- type: OS::Nova::Server
- properties:
- image: { get_param: image_id }
- flavor: { get_param: instance_type }
- key_name: { get_param: key_name }
- networks:
- - uuid: {get_param: network}
- user_data_format: RAW
- user_data:
- str_replace:
- template: |
- #!/bin/sh
- # Trigger rescan to ensure we see the attached volume
- for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
- # Wait for the rescan as the volume doesn't appear immediately
- for i in $(seq 1 rescan_timeout)
- do
- grep -q dev_name /proc/partitions && break
- sleep 1
- done
- if grep -q dev_name /proc/partitions
- then
- mount /dev/dev_name /mnt
- TESTDATA=$(cat /mnt/testfile)
- curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Volume Data:'$TESTDATA'", "UniqueId": "instance1"}' "wc_url"
- else
- curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
- fi
- params:
- wc_url: { get_resource: wait_handle }
- dev_name: { get_param: dev_name }
- rescan_timeout: { get_param: rescan_timeout }
-
- wait_handle:
- type: OS::Heat::UpdateWaitConditionHandle
-
- wait_condition:
- type: AWS::CloudFormation::WaitCondition
- properties:
- Count: 1
- Handle: { get_resource: wait_handle }
- Timeout: { get_param: timeout }
-
-
-outputs:
- status:
- description: status
- value: { get_attr: ['volume', 'status'] }
-
- size:
- description: size
- value: { get_attr: ['volume', 'size'] }
-
- display_description:
- description: display_description
- value: { get_attr: ['volume', 'display_description'] }
-
- volume_id:
- value: { get_resource: volume }
-
- testfile_data:
- description: Contents of /mnt/testfile from the mounted volume
- value: { get_attr: ['wait_condition', 'Data'] }
diff --git a/heat_integrationtests/scenario/templates/test_volumes_delete_snapshot.yaml b/heat_integrationtests/scenario/templates/test_volumes_delete_snapshot.yaml
deleted file mode 100644
index 3893b526c..000000000
--- a/heat_integrationtests/scenario/templates/test_volumes_delete_snapshot.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-heat_template_version: 2013-05-23
-
-parameters:
- key_name:
- type: string
- description: keypair to enable SSH access to the instance.
-
- instance_type:
- type: string
- description: Type of the instance to be created.
-
- image_id:
- type: string
- description: ID of the image to use for the instance to be created.
-
- timeout:
- type: number
- description: Stack creation timeout
-
- dev_name:
- type: string
- description: Expected device name for volume
- default: vdb
-
- test_string:
- type: string
- description: Test string which is written to volume
- default: ateststring
-
- rescan_timeout:
- type: number
- description: Max number of seconds to wait for volume after rescan
- default: 120
-
- network:
- type: string
-
- volume_description:
- type: string
- description: Description of volume
- default: A volume description
-
- volume_size:
- type: number
- description: Size of volume
- default: 1
-
-resources:
- volume:
- deletion_policy: 'Snapshot'
- type: OS::Cinder::Volume
- properties:
- size: {get_param: volume_size}
- description: {get_param: volume_description}
-
- volume_attachment:
- type: OS::Cinder::VolumeAttachment
- properties:
- volume_id: { get_resource: volume }
- instance_uuid: { get_resource: instance }
-
- instance:
- type: OS::Nova::Server
- properties:
- image: { get_param: image_id }
- flavor: { get_param: instance_type }
- key_name: { get_param: key_name }
- networks:
- - uuid: {get_param: network}
- user_data_format: RAW
- user_data:
- str_replace:
- template: |
- #!/bin/sh
- # Trigger rescan to ensure we see the attached volume
- for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
- # Wait for the rescan as the volume doesn't appear immediately
- for i in $(seq 1 rescan_timeout)
- do
- grep -q dev_name /proc/partitions && break
- sleep 1
- done
- if grep -q dev_name /proc/partitions
- then
- mkfs.ext4 /dev/dev_name
- mount /dev/dev_name /mnt
- echo "test_string" > /mnt/testfile
- umount /mnt
- curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Completed volume configuration.", "UniqueId": "instance1"}' "wc_url"
- else
- curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
- fi
- params:
- wc_url: { get_resource: wait_handle }
- dev_name: { get_param: dev_name }
- rescan_timeout: { get_param: rescan_timeout }
- test_string: { get_param: test_string }
-
- wait_handle:
- type: OS::Heat::UpdateWaitConditionHandle
-
- wait_condition:
- type: AWS::CloudFormation::WaitCondition
- properties:
- Count: 1
- Handle: { get_resource: wait_handle }
- Timeout: { get_param: timeout }
-
-
-outputs:
- status:
- description: status
- value: { get_attr: ['volume', 'status'] }
-
- size:
- description: size
- value: { get_attr: ['volume', 'size'] }
-
- display_description:
- description: display_description
- value: { get_attr: ['volume', 'display_description'] }
-
- volume_id:
- value: { get_resource: volume }
diff --git a/heat_integrationtests/scenario/test_aodh_alarm.py b/heat_integrationtests/scenario/test_aodh_alarm.py
deleted file mode 100644
index a9b3d1126..000000000
--- a/heat_integrationtests/scenario/test_aodh_alarm.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-from heat.common import timeutils
-from oslo_log import log as logging
-
-from heat_integrationtests.common import test
-from heat_integrationtests.scenario import scenario_base
-
-LOG = logging.getLogger(__name__)
-
-
-class AodhAlarmTest(scenario_base.ScenarioTestsBase):
- """Class is responsible for testing of aodh usage."""
- def setUp(self):
- super(AodhAlarmTest, self).setUp()
- self.template = self._load_template(__file__,
- 'test_aodh_alarm.yaml',
- 'templates')
-
- def check_instance_count(self, stack_identifier, expected):
- stack = self.client.stacks.get(stack_identifier)
- actual = self._stack_output(stack, 'asg_size')
- if actual != expected:
- LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
- actual))
- return actual == expected
-
- def test_alarm(self):
- """Confirm we can create an alarm and trigger it."""
- # create metric
- metric = self.metric_client.metric.create({
- 'name': 'my_metric',
- 'archive_policy_name': 'high',
- })
-
- # create the stack
- parameters = {'metric_id': metric['id']}
- stack_identifier = self.stack_create(template=self.template,
- parameters=parameters)
- measures = [{'timestamp': timeutils.isotime(datetime.datetime.now()),
- 'value': 100}, {'timestamp': timeutils.isotime(
- datetime.datetime.now() + datetime.timedelta(
- minutes=1)), 'value': 100}]
- # send measures(should cause the alarm to fire)
- self.metric_client.metric.add_measures(metric['id'], measures)
-
- # confirm we get a scaleup.
- # Note: there is little point waiting more than 60s+time to scale up.
- self.assertTrue(test.call_until_true(
- 120, 2, self.check_instance_count, stack_identifier, 2))
-
- # cleanup metric
- self.metric_client.metric.delete(metric['id'])
diff --git a/heat_integrationtests/scenario/test_autoscaling_lb.py b/heat_integrationtests/scenario/test_autoscaling_lb.py
deleted file mode 100644
index 5e8ad272d..000000000
--- a/heat_integrationtests/scenario/test_autoscaling_lb.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-
-import requests
-
-from heat_integrationtests.common import test
-from heat_integrationtests.scenario import scenario_base
-
-
-class AutoscalingLoadBalancerTest(scenario_base.ScenarioTestsBase):
- """The class is responsible for testing ASG + LBv1 scenario.
-
- The very common use case tested is an autoscaling group
- of some web application servers behind a loadbalancer.
- """
-
- def setUp(self):
- super(AutoscalingLoadBalancerTest, self).setUp()
- self.template_name = 'test_autoscaling_lb_neutron.yaml'
- self.app_server_template_name = 'app_server_neutron.yaml'
- self.webapp_template_name = 'netcat-webapp.yaml'
- if not self.is_network_extension_supported('lbaas'):
- self.skipTest('LBaas v1 extension not available, skipping')
-
- def check_num_responses(self, url, expected_num, retries=10):
- resp = set()
- for count in range(retries):
- time.sleep(1)
- try:
- r = requests.get(url, verify=self.verify_cert)
- except requests.exceptions.ConnectionError:
- # The LB may not be up yet, let's retry
- continue
- # skip unsuccessful requests
- if r.status_code == 200:
- resp.add(r.text)
- self.assertEqual(expected_num, len(resp))
-
- def test_autoscaling_loadbalancer_neutron(self):
- """Check work of AutoScaing and Neutron LBaaS v1 resource in Heat.
-
- The scenario is the following:
- 1. Launch a stack with a load balancer and autoscaling group
- of one server, wait until stack create is complete.
- 2. Check that there is only one distinctive response from
- loadbalanced IP.
- 3. Signal the scale_up policy, wait until all resources in
- autoscaling group are complete.
- 4. Check that now there are two distinctive responses from
- loadbalanced IP.
- """
-
- parameters = {
- 'flavor': self.conf.minimal_instance_type,
- 'image': self.conf.minimal_image_ref,
- 'net': self.conf.fixed_network_name,
- 'subnet': self.conf.fixed_subnet_name,
- 'public_net': self.conf.floating_network_name,
- 'app_port': 8080,
- 'lb_port': 80,
- 'timeout': 600
- }
-
- app_server_template = self._load_template(
- __file__, self.app_server_template_name, self.sub_dir
- )
- webapp_template = self._load_template(
- __file__, self.webapp_template_name, self.sub_dir
- )
- files = {'appserver.yaml': app_server_template,
- 'webapp.yaml': webapp_template}
- env = {'resource_registry':
- {'OS::Test::NeutronAppServer': 'appserver.yaml',
- 'OS::Test::WebAppConfig': 'webapp.yaml'}}
- # Launch stack
- sid = self.launch_stack(
- template_name=self.template_name,
- parameters=parameters,
- files=files,
- environment=env
- )
- stack = self.client.stacks.get(sid)
- lb_url = self._stack_output(stack, 'lburl')
- # Check number of distinctive responces, must be 1
- self.check_num_responses(lb_url, 1)
-
- # Signal the scaling hook
- self.client.resources.signal(sid, 'scale_up')
-
- # Wait for AutoScalingGroup update to finish
- asg = self.client.resources.get(sid, 'asg')
- test.call_until_true(self.conf.build_timeout,
- self.conf.build_interval,
- self.check_autoscale_complete,
- asg.physical_resource_id, 2, sid, 'scale_up')
-
- # Check number of distinctive responses, must now be 2
- self.check_num_responses(lb_url, 2)
diff --git a/heat_integrationtests/scenario/test_autoscaling_lbv2.py b/heat_integrationtests/scenario/test_autoscaling_lbv2.py
deleted file mode 100644
index 4ddc21ea3..000000000
--- a/heat_integrationtests/scenario/test_autoscaling_lbv2.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-
-import requests
-
-from heat_integrationtests.common import test
-from heat_integrationtests.scenario import scenario_base
-
-
-class AutoscalingLoadBalancerv2Test(scenario_base.ScenarioTestsBase):
- """The class is responsible for testing ASG + LBv2 scenario.
-
- The very common use case tested is an autoscaling group
- of some web application servers behind a loadbalancer.
- """
-
- def setUp(self):
- super(AutoscalingLoadBalancerv2Test, self).setUp()
- self.template_name = 'test_autoscaling_lbv2_neutron.yaml'
- self.app_server_template_name = 'app_server_lbv2_neutron.yaml'
- self.webapp_template_name = 'netcat-webapp.yaml'
- if not self.is_network_extension_supported('lbaasv2'):
- self.skipTest('LBaasv2 extension not available, skipping')
-
- def check_num_responses(self, url, expected_num, retries=20):
- resp = set()
- for count in range(retries):
- time.sleep(2)
- try:
- r = requests.get(url, verify=self.verify_cert)
- except requests.exceptions.ConnectionError:
- # The LB may not be up yet, let's retry
- continue
- # skip unsuccessful requests
- if r.status_code == 200:
- resp.add(r.text)
- if len(resp) == expected_num:
- break
- self.assertEqual(expected_num, len(resp))
-
- def test_autoscaling_loadbalancer_neutron(self):
- """Check work of AutoScaing and Neutron LBaaS v2 resource in Heat.
-
- The scenario is the following:
- 1. Launch a stack with a load balancer and autoscaling group
- of one server, wait until stack create is complete.
- 2. Check that there is only one distinctive response from
- loadbalanced IP.
- 3. Signal the scale_up policy, wait until all resources in
- autoscaling group are complete.
- 4. Check that now there are two distinctive responses from
- loadbalanced IP.
- """
-
- parameters = {
- 'flavor': self.conf.minimal_instance_type,
- 'image': self.conf.minimal_image_ref,
- 'net': self.conf.fixed_network_name,
- 'subnet': self.conf.fixed_subnet_name,
- 'public_net': self.conf.floating_network_name
- }
-
- app_server_template = self._load_template(
- __file__, self.app_server_template_name, self.sub_dir
- )
- webapp_template = self._load_template(
- __file__, self.webapp_template_name, self.sub_dir
- )
- files = {'appserver.yaml': app_server_template,
- 'webapp.yaml': webapp_template}
- env = {'resource_registry':
- {'OS::Test::NeutronAppServer': 'appserver.yaml',
- 'OS::Test::WebAppConfig': 'webapp.yaml'}}
-
- # Launch stack
- sid = self.launch_stack(
- template_name=self.template_name,
- parameters=parameters,
- files=files,
- environment=env
- )
- stack = self.client.stacks.get(sid)
- lb_url = self._stack_output(stack, 'lburl')
- # Check number of distinctive responces, must be 1
- self.check_num_responses(lb_url, 1)
-
- # Signal the scaling hook
- self.client.resources.signal(sid, 'scale_up')
-
- # Wait for AutoScalingGroup update to finish
- asg = self.client.resources.get(sid, 'asg')
- test.call_until_true(self.conf.build_timeout,
- self.conf.build_interval,
- self.check_autoscale_complete,
- asg.physical_resource_id, 2, sid, 'scale_up')
-
- # Check number of distinctive responses, must now be 2
- self.check_num_responses(lb_url, 2)
diff --git a/heat_integrationtests/scenario/test_base_resources.py b/heat_integrationtests/scenario/test_base_resources.py
deleted file mode 100644
index 80194a0b6..000000000
--- a/heat_integrationtests/scenario/test_base_resources.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heat_integrationtests.common import test
-from heat_integrationtests.scenario import scenario_base
-from heatclient.common import template_utils
-
-
-class BasicResourcesTest(scenario_base.ScenarioTestsBase):
-
- def setUp(self):
- super(BasicResourcesTest, self).setUp()
- if not self.conf.image_ref:
- raise self.skipException("No image configured to test")
- if not self.conf.instance_type:
- raise self.skipException("No flavor configured to test")
-
- def check_stack(self):
- sid = self.stack_identifier
- # Check that stack were created
- self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
- server_resource = self.client.resources.get(sid, 'server')
- server_id = server_resource.physical_resource_id
- server = self.compute_client.servers.get(server_id)
- self.assertEqual(server.id, server_id)
-
- stack = self.client.stacks.get(sid)
-
- server_networks = self._stack_output(stack, 'server_networks')
- self.assertIn(self.private_net_name, server_networks)
-
- def test_base_resources_integration(self):
- """Define test for base resources interation from core porjects
-
- The alternative scenario is the following:
- 1. Create a stack with basic resources from core projects.
- 2. Check that all stack resources are created successfully.
- 3. Wait for deployment.
- 4. Check that stack was created.
- 5. Check stack outputs.
- """
-
- self.private_net_name = test.rand_name('heat-net')
- parameters = {
- 'key_name': test.rand_name('heat-key'),
- 'flavor': self.conf.instance_type,
- 'image': self.conf.image_ref,
- 'vol_size': self.conf.volume_size,
- 'private_net_name': self.private_net_name
- }
-
- env_files, env = template_utils.process_environment_and_files(
- self.conf.boot_config_env)
-
- # Launch stack
- self.stack_identifier = self.launch_stack(
- template_name='test_base_resources.yaml',
- parameters=parameters,
- expected_status=None,
- environment=env
- )
-
- # Check stack
- self.check_stack()
diff --git a/heat_integrationtests/scenario/test_server_cfn_init.py b/heat_integrationtests/scenario/test_server_cfn_init.py
deleted file mode 100644
index c7d84e3bf..000000000
--- a/heat_integrationtests/scenario/test_server_cfn_init.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-
-from heat_integrationtests.common import exceptions
-from heat_integrationtests.scenario import scenario_base
-
-
-class CfnInitIntegrationTest(scenario_base.ScenarioTestsBase):
- """Testing cfn-init and cfn-signal workability."""
-
- def setUp(self):
- super(CfnInitIntegrationTest, self).setUp()
- if not self.conf.image_ref:
- raise self.skipException("No image configured to test")
- if not self.conf.instance_type:
- raise self.skipException("No flavor configured to test")
-
- def check_stack(self, sid):
- # Check status of all resources
- for res in ('WaitHandle', 'SmokeSecurityGroup', 'SmokeKeys',
- 'CfnUser', 'SmokeServer', 'SmokeServerElasticIp'):
- self._wait_for_resource_status(
- sid, res, 'CREATE_COMPLETE')
-
- server_resource = self.client.resources.get(sid, 'SmokeServer')
- server_id = server_resource.physical_resource_id
- server = self.compute_client.servers.get(server_id)
-
- try:
- self._wait_for_resource_status(
- sid, 'WaitCondition', 'CREATE_COMPLETE')
- finally:
- # attempt to log the server console regardless of WaitCondition
- # going to complete. This allows successful and failed cloud-init
- # logs to be compared
- self._log_console_output(servers=[server])
-
- stack = self.client.stacks.get(sid)
-
- # This is an assert of great significance, as it means the following
- # has happened:
- # - cfn-init read the provided metadata and wrote out a file
- # - a user was created and credentials written to the server
- # - a cfn-signal was built which was signed with provided credentials
- # - the wait condition was fulfilled and the stack has changed state
- wait_status = json.loads(
- self._stack_output(stack, 'WaitConditionStatus'))
- self.assertEqual('smoke test complete', wait_status['smoke_status'])
-
- # Check EIP attributes.
- server_floatingip_id = self._stack_output(stack,
- 'ElasticIp_Id')
- self.assertIsNotNone(server_floatingip_id)
-
- # Fetch EIP details.
- net_show = self.network_client.show_floatingip(
- floatingip=server_floatingip_id)
- floating_ip = net_show['floatingip']['floating_ip_address']
- port_id = net_show['floatingip']['port_id']
-
- # Ensure that EIP was assigned to server.
- port_show = self.network_client.show_port(port=port_id)
- self.assertEqual(server.id, port_show['port']['device_id'])
- server_ip = self._stack_output(stack, 'SmokeServerElasticIp')
- self.assertEqual(server_ip, floating_ip)
-
- # Check that created server is reachable
- if not self._ping_ip_address(server_ip):
- self._log_console_output(servers=[server])
- self.fail(
- "Timed out waiting for %s to become reachable" % server_ip)
-
- # Check that the user can authenticate with the generated keypair
- if self.keypair:
- try:
- linux_client = self.get_remote_client(
- server_ip, username='ec2-user')
- linux_client.validate_authentication()
- except (exceptions.ServerUnreachable,
- exceptions.SSHTimeout):
- self._log_console_output(servers=[server])
- raise
-
- def test_server_cfn_init(self):
- """Check cfn-init and cfn-signal availability on the created server.
-
- The alternative scenario is the following:
- 1. Create a stack with a server and configured security group.
- 2. Check that all stack resources were created.
- 3. Check that created server is reachable.
- 4. Check that stack was created successfully.
- 5. Check that is it possible to connect to server
- via generated keypair.
- """
- parameters = {
- 'key_name': self.keypair_name,
- 'flavor': self.conf.instance_type,
- 'image': self.conf.image_ref,
- 'timeout': self.conf.build_timeout,
- 'subnet': self.net['subnets'][0],
- }
-
- # Launch stack
- stack_id = self.launch_stack(
- template_name="test_server_cfn_init.yaml",
- parameters=parameters,
- expected_status=None
- )
-
- # Check stack
- self.check_stack(stack_id)
diff --git a/heat_integrationtests/scenario/test_server_signal.py b/heat_integrationtests/scenario/test_server_signal.py
deleted file mode 100644
index b2085e2de..000000000
--- a/heat_integrationtests/scenario/test_server_signal.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-
-from heat_integrationtests.common import exceptions
-from heat_integrationtests.scenario import scenario_base
-
-
-class ServerSignalIntegrationTest(scenario_base.ScenarioTestsBase):
- """Test a server in a created network can signal to heat."""
-
- def _test_server_signal(self, user_data_format='RAW',
- image=None):
- """Check a server in a created network can signal to heat."""
- parameters = {
- 'key_name': self.keypair_name,
- 'flavor': self.conf.minimal_instance_type,
- 'image': image,
- 'timeout': self.conf.build_timeout,
- 'user_data_format': user_data_format
- }
-
- # Launch stack
- sid = self.launch_stack(
- template_name="test_server_signal.yaml",
- parameters=parameters,
- expected_status=None
- )
-
- # Check status of all resources
- for res in ('sg', 'floating_ip', 'network', 'router', 'subnet',
- 'router_interface', 'wait_handle', 'server',
- 'server_floating_ip_assoc'):
- self._wait_for_resource_status(
- sid, res, 'CREATE_COMPLETE')
-
- server_resource = self.client.resources.get(sid, 'server')
- server_id = server_resource.physical_resource_id
- server = self.compute_client.servers.get(server_id)
-
- try:
- self._wait_for_resource_status(
- sid, 'wait_condition', 'CREATE_COMPLETE')
- except (exceptions.StackResourceBuildErrorException,
- exceptions.TimeoutException):
- raise
- finally:
- # attempt to log the server console regardless of WaitCondition
- # going to complete. This allows successful and failed cloud-init
- # logs to be compared
- self._log_console_output(servers=[server])
-
- stack = self.client.stacks.get(sid)
-
- wc_data = json.loads(
- self._stack_output(stack, 'wc_data'))
- self.assertEqual({'1': 'test complete'}, wc_data)
-
- server_ip = self._stack_output(stack, 'server_ip')
-
- # Check that created server is reachable
- if not self._ping_ip_address(server_ip):
- self._log_console_output(servers=[server])
- self.fail(
- "Timed out waiting for %s to become reachable" % server_ip)
-
- def test_server_signal_userdata_format_raw(self):
- self._test_server_signal(image=self.conf.minimal_image_ref)
-
- def test_server_signal_userdata_format_software_config(self):
- if not self.conf.image_ref:
- raise self.skipException("No image configured to test")
- self._test_server_signal(user_data_format='SOFTWARE_CONFIG',
- image=self.conf.image_ref)
diff --git a/heat_integrationtests/scenario/test_server_software_config.py b/heat_integrationtests/scenario/test_server_software_config.py
deleted file mode 100644
index f4c7da53f..000000000
--- a/heat_integrationtests/scenario/test_server_software_config.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from heatclient.common import template_utils
-import six
-
-from heat_integrationtests.scenario import scenario_base
-
-CFG1_SH = '''#!/bin/sh
-echo "Writing to /tmp/$bar"
-echo $foo > /tmp/$bar
-echo -n "The file /tmp/$bar contains `cat /tmp/$bar` for server \
-$deploy_server_id during $deploy_action" > $heat_outputs_path.result
-echo "Written to /tmp/$bar"
-echo "Output to stderr" 1>&2
-'''
-
-CFG3_PP = '''file {'barfile':
- ensure => file,
- mode => 0644,
- path => "/tmp/$::bar",
- content => "$::foo",
-}
-file {'output_result':
- ensure => file,
- path => "$::heat_outputs_path.result",
- mode => 0644,
- content => "The file /tmp/$::bar contains $::foo for server \
-$::deploy_server_id during $::deploy_action",
-}
-'''
-
-
-class SoftwareConfigIntegrationTest(scenario_base.ScenarioTestsBase):
-
- def setUp(self):
- super(SoftwareConfigIntegrationTest, self).setUp()
- if not self.conf.image_ref:
- raise self.skipException("No image configured to test")
- if not self.conf.instance_type:
- raise self.skipException("No flavor configured to test")
-
- def check_stack(self):
- sid = self.stack_identifier
- # Check that all stack resources were created
- for res in ('cfg2a', 'cfg2b', 'cfg1', 'cfg3', 'server'):
- self._wait_for_resource_status(
- sid, res, 'CREATE_COMPLETE')
-
- server_resource = self.client.resources.get(sid, 'server')
- server_id = server_resource.physical_resource_id
- server = self.compute_client.servers.get(server_id)
-
- # Waiting for each deployment to contribute their
- # config to resource
- try:
- for res in ('dep2b', 'dep1', 'dep3'):
- self._wait_for_resource_status(
- sid, res, 'CREATE_IN_PROGRESS')
-
- server_metadata = self.client.resources.metadata(
- sid, 'server')
- deployments = dict((d['name'], d) for d in
- server_metadata['deployments'])
-
- for res in ('dep2a', 'dep2b', 'dep1', 'dep3'):
- self._wait_for_resource_status(
- sid, res, 'CREATE_COMPLETE')
- finally:
- # attempt to log the server console regardless of deployments
- # going to complete. This allows successful and failed boot
- # logs to be compared
- self._log_console_output(servers=[server])
-
- complete_server_metadata = self.client.resources.metadata(
- sid, 'server')
-
- # Ensure any previously available deployments haven't changed so
- # config isn't re-triggered
- complete_deployments = dict((d['name'], d) for d in
- complete_server_metadata['deployments'])
- for k, v in six.iteritems(deployments):
- self.assertEqual(v, complete_deployments[k])
-
- stack = self.client.stacks.get(sid)
-
- res1 = self._stack_output(stack, 'res1')
- self.assertEqual(
- 'The file %s contains %s for server %s during %s' % (
- '/tmp/baaaaa', 'fooooo', server_id, 'CREATE'),
- res1['result'])
- self.assertEqual(0, res1['status_code'])
- self.assertEqual('Output to stderr\n', res1['stderr'])
- self.assertGreater(len(res1['stdout']), 0)
-
- res2 = self._stack_output(stack, 'res2')
- self.assertEqual(
- 'The file %s contains %s for server %s during %s' % (
- '/tmp/cfn-init-foo', 'barrr', server_id, 'CREATE'),
- res2['result'])
- self.assertEqual(0, res2['status_code'])
- self.assertEqual('', res2['stderr'])
- self.assertEqual('', res2['stdout'])
-
- res3 = self._stack_output(stack, 'res3')
- self.assertEqual(
- 'The file %s contains %s for server %s during %s' % (
- '/tmp/ba', 'fo', server_id, 'CREATE'),
- res3['result'])
- self.assertEqual(0, res3['status_code'])
- self.assertEqual('', res3['stderr'])
- self.assertGreater(len(res1['stdout']), 0)
-
- dep1_resource = self.client.resources.get(sid, 'dep1')
- dep1_id = dep1_resource.physical_resource_id
- dep1_dep = self.client.software_deployments.get(dep1_id)
- if hasattr(dep1_dep, 'updated_time'):
- # Only check updated_time if the attribute exists.
- # This allows latest heat agent code to be tested with
- # Juno heat (which doesn't expose updated_time)
- self.assertIsNotNone(dep1_dep.updated_time)
- self.assertNotEqual(
- dep1_dep.updated_time,
- dep1_dep.creation_time)
-
- def test_server_software_config(self):
- """Check that passed files with scripts are executed on created server.
-
- The alternative scenario is the following:
- 1. Create a stack and pass files with scripts.
- 2. Check that all stack resources are created successfully.
- 3. Wait for all deployments.
- 4. Check that stack was created.
- 5. Check stack outputs.
- """
-
- parameters = {
- 'key_name': self.keypair_name,
- 'flavor': self.conf.instance_type,
- 'image': self.conf.image_ref,
- 'network': self.net['id']
- }
-
- files = {
- 'cfg1.sh': CFG1_SH,
- 'cfg3.pp': CFG3_PP
- }
-
- env_files, env = template_utils.process_environment_and_files(
- self.conf.boot_config_env)
-
- # Launch stack
- self.stack_identifier = self.launch_stack(
- template_name='test_server_software_config.yaml',
- parameters=parameters,
- files=dict(list(files.items()) + list(env_files.items())),
- expected_status=None,
- environment=env
- )
-
- # Check stack
- self.check_stack()
diff --git a/heat_integrationtests/scenario/test_volumes.py b/heat_integrationtests/scenario/test_volumes.py
deleted file mode 100644
index 47e583dfb..000000000
--- a/heat_integrationtests/scenario/test_volumes.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from cinderclient import exceptions as cinder_exceptions
-from oslo_log import log as logging
-import six
-
-from heat_integrationtests.common import exceptions
-from heat_integrationtests.scenario import scenario_base
-
-LOG = logging.getLogger(__name__)
-
-
-class VolumeBackupRestoreIntegrationTest(scenario_base.ScenarioTestsBase):
- """Class is responsible for testing of volume backup."""
-
- def setUp(self):
- super(VolumeBackupRestoreIntegrationTest, self).setUp()
- self.volume_description = 'A test volume description 123'
- self.volume_size = self.conf.volume_size
-
- def _cinder_verify(self, volume_id, expected_status='available'):
- self.assertIsNotNone(volume_id)
- volume = self.volume_client.volumes.get(volume_id)
- self.assertIsNotNone(volume)
- self.assertEqual(expected_status, volume.status)
- self.assertEqual(self.volume_size, volume.size)
- self.assertEqual(self.volume_description,
- volume.display_description)
-
- def _outputs_verify(self, stack, expected_status='available'):
- self.assertEqual(expected_status,
- self._stack_output(stack, 'status'))
- self.assertEqual(six.text_type(self.volume_size),
- self._stack_output(stack, 'size'))
- self.assertEqual(self.volume_description,
- self._stack_output(stack, 'display_description'))
-
- def check_stack(self, stack_id, parameters):
- stack = self.client.stacks.get(stack_id)
-
- # Verify with cinder that the volume exists, with matching details
- volume_id = self._stack_output(stack, 'volume_id')
- self._cinder_verify(volume_id, expected_status='in-use')
-
- # Verify the stack outputs are as expected
- self._outputs_verify(stack, expected_status='in-use')
-
- # Delete the stack and ensure a backup is created for volume_id
- # but the volume itself is gone
- self._stack_delete(stack_id)
- self.assertRaises(cinder_exceptions.NotFound,
- self.volume_client.volumes.get,
- volume_id)
-
- backups = self.volume_client.backups.list()
- self.assertIsNotNone(backups)
- backups_filtered = [b for b in backups if b.volume_id == volume_id]
- self.assertEqual(1, len(backups_filtered))
- backup = backups_filtered[0]
- self.addCleanup(self.volume_client.backups.delete, backup.id)
-
- # Now, we create another stack where the volume is created from the
- # backup created by the previous stack
- try:
- stack_identifier2 = self.launch_stack(
- template_name='test_volumes_create_from_backup.yaml',
- parameters=parameters,
- add_parameters={'backup_id': backup.id})
- stack2 = self.client.stacks.get(stack_identifier2)
- except exceptions.StackBuildErrorException:
- LOG.exception("Halting test due to bug: #1382300")
- return
-
- # Verify with cinder that the volume exists, with matching details
- volume_id2 = self._stack_output(stack2, 'volume_id')
- self._cinder_verify(volume_id2, expected_status='in-use')
-
- # Verify the stack outputs are as expected
- self._outputs_verify(stack2, expected_status='in-use')
- testfile_data = self._stack_output(stack2, 'testfile_data')
- self.assertEqual('{"instance1": "Volume Data:ateststring"}',
- testfile_data)
-
- # Delete the stack and ensure the volume is gone
- self._stack_delete(stack_identifier2)
- self.assertRaises(cinder_exceptions.NotFound,
- self.volume_client.volumes.get,
- volume_id2)
-
- def test_cinder_volume_create_backup_restore(self):
- """Ensure the 'Snapshot' deletion policy works.
-
- This requires a more complex test, but it tests several aspects
- of the heat cinder resources:
- 1. Create a volume, attach it to an instance, write some data to it
- 2. Delete the stack, with 'Snapshot' specified, creates a backup
- 3. Check the snapshot has created a volume backup
- 4. Create a new stack, where the volume is created from the backup
- 5. Verify the test data written in (1) is present in the new volume
- """
- parameters = {
- 'key_name': self.keypair_name,
- 'instance_type': self.conf.minimal_instance_type,
- 'image_id': self.conf.minimal_image_ref,
- 'volume_description': self.volume_description,
- 'timeout': self.conf.build_timeout,
- 'network': self.net['id']
- }
-
- # Launch stack
- stack_id = self.launch_stack(
- template_name='test_volumes_delete_snapshot.yaml',
- parameters=parameters,
- add_parameters={'volume_size': self.volume_size}
- )
-
- # Check stack
- self.check_stack(stack_id, parameters)
diff --git a/install.sh b/install.sh
index 93e73416e..e99951774 100755
--- a/install.sh
+++ b/install.sh
@@ -77,7 +77,6 @@ basic_configuration() {
BRIDGE_IP=127.0.0.1
iniset $target DEFAULT heat_metadata_server_url "http://${BRIDGE_IP}:8000/"
- iniset $target DEFAULT heat_watch_server_url "http://${BRIDGE_IP}:8003/"
if detect_rabbit
then
diff --git a/playbooks/devstack/functional/run.yaml b/playbooks/devstack/functional/run.yaml
index 8263f90d0..c74b858b2 100644
--- a/playbooks/devstack/functional/run.yaml
+++ b/playbooks/devstack/functional/run.yaml
@@ -29,16 +29,10 @@
export PYTHONUNBUFFERED=true
services=rabbit,tempest,mysql,dstat,key
services+=,n-api,n-api-meta,n-cpu,n-cond,n-sch,n-crt
-
- # TODO(ricolin) replace the usage of ZUUL_BRANCH
- # placement services mandatory for nova from ocata
- if [[ "stable/newton" != $ZUUL_BRANCH ]]; then
- services+=,placement-api,placement-client
- fi
-
+ services+=,placement-api,placement-client
services+=,g-api,g-reg
services+=,c-sch,c-api,c-vol,c-bak
- services+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
+ services+=,q-svc,q-dhcp,q-meta,q-agt,q-l3,q-trunk
if [ "{{ use_python3 }}" -eq 1 ] ; then
export DEVSTACK_GATE_USE_PYTHON3=True
@@ -47,10 +41,6 @@
export DEVSTACK_GATE_USE_PYTHON3=False
services+=,s-proxy,s-object,s-container,s-account
fi
- # TODO(ricolin) replace the usage of ZUUL_BRANCH
- if [[ ! "stable/newton stable/ocata" =~ $ZUUL_BRANCH ]]; then
- services+=,q-trunk
- fi
export DEVSTACK_GATE_NEUTRON=1
export DEVSTACK_GATE_TEMPEST=1
@@ -74,13 +64,17 @@
# Enable LBaaS V2 plugin
export PROJECTS="openstack/neutron-lbaas $PROJECTS"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas"
+ export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin octavia https://git.openstack.org/openstack/octavia"
# enabling lbaas plugin does not enable the lbaasv2 service, explicitly enable it
- services+=,q-lbaasv2
+ services+=,q-lbaasv2,octavia,o-cw,o-hk,o-hm,o-api
export PROJECTS="openstack/barbican $PROJECTS"
export PROJECTS="openstack/python-barbicanclient $PROJECTS"
export PROJECTS="openstack/barbican-tempest-plugin $PROJECTS"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin barbican https://git.openstack.org/openstack/barbican"
- # the lbaas v2 driver choice is in the gate pre test hook
+
+ # use heat-tempest-plugin
+ export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
+ export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_PLUGINS+=' /opt/stack/new/heat-tempest-plugin'"
export OVERRIDE_ENABLED_SERVICES=$services
diff --git a/playbooks/devstack/grenade/run.yaml b/playbooks/devstack/grenade/run.yaml
index e25460302..5e9a84894 100644
--- a/playbooks/devstack/grenade/run.yaml
+++ b/playbooks/devstack/grenade/run.yaml
@@ -30,6 +30,7 @@
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
enable_plugin heat git://git.openstack.org/openstack/heat
+ TEMPEST_PLUGINS+=' ../heat-tempest-plugin'
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
@@ -40,6 +41,7 @@
set -e
set -x
export PROJECTS="openstack-dev/grenade $PROJECTS"
+ export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
export PYTHONUNBUFFERED=true
export GRENADE_PLUGINRC="enable_grenade_plugin heat https://git.openstack.org/openstack/heat"
export DEVSTACK_GATE_NEUTRON=1
@@ -50,6 +52,7 @@
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
export DEVSTACK_GATE_TOPOLOGY="{{ topology }}"
+ export DEVSTACK_LOCAL_CONFIG=$'\n'"HOST_TOPOLOGY={{ topology }}"
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
diff --git a/releasenotes/notes/add-hostname-hints-security_groups-to-container-d3b69ae4b6f71fc7.yaml b/releasenotes/notes/add-hostname-hints-security_groups-to-container-d3b69ae4b6f71fc7.yaml
new file mode 100644
index 000000000..772a6ddd3
--- /dev/null
+++ b/releasenotes/notes/add-hostname-hints-security_groups-to-container-d3b69ae4b6f71fc7.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added ``hostname``, ``hints``, ``security_groups``, and ``mounts``
+ properties to Zun Container resources.
diff --git a/releasenotes/notes/deprecate-threshold-alarm-5738f5ab8aebfd20.yaml b/releasenotes/notes/deprecate-threshold-alarm-5738f5ab8aebfd20.yaml
new file mode 100644
index 000000000..62901d418
--- /dev/null
+++ b/releasenotes/notes/deprecate-threshold-alarm-5738f5ab8aebfd20.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - Threshold alarm which uses ceilometer API is deprecated in aodh since
+ Ocata. Please use ``OS::Aodh::GnocchiAggregationByResourcesAlarm`` in
+ place of ``OS::Aodh::Alarm``. \ No newline at end of file
diff --git a/releasenotes/notes/drop-watch-rule-watch-data-tables-9ecb8da574611236.yaml b/releasenotes/notes/drop-watch-rule-watch-data-tables-9ecb8da574611236.yaml
new file mode 100644
index 000000000..6d9fd1f45
--- /dev/null
+++ b/releasenotes/notes/drop-watch-rule-watch-data-tables-9ecb8da574611236.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ The database upgrade for Heat Queens release drops 'watch_rule'
+ and 'watch_data' tables from the heat database.
diff --git a/releasenotes/notes/force-delete-nova-instance-6ed5d7fbd5b6f5fe.yaml b/releasenotes/notes/force-delete-nova-instance-6ed5d7fbd5b6f5fe.yaml
new file mode 100644
index 000000000..42c2c751e
--- /dev/null
+++ b/releasenotes/notes/force-delete-nova-instance-6ed5d7fbd5b6f5fe.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - Force delete the nova instance.
+ If a resource is related with a nova instance which
+ is in 'SOFT_DELETED' status, the resource can't be
+ deleted, when nova config 'reclaim_instance_interval'.
+ so, force-delete the nova instance, and then all the
+ resources are related with the instance would be
+ processed properly.
diff --git a/releasenotes/notes/hidden-heat-harestarter-resource-a123479c317886a3.yaml b/releasenotes/notes/hidden-heat-harestarter-resource-a123479c317886a3.yaml
new file mode 100644
index 000000000..2fb5f20f6
--- /dev/null
+++ b/releasenotes/notes/hidden-heat-harestarter-resource-a123479c317886a3.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ The ``OS::Heat::HARestarter`` resource type is no longer supported. This
+ resource type is now hidden from the documentation. HARestarter resources
+ in stacks, including pre-existing ones, are now only placeholders and will
+ no longer do anything. The recommended alternative is to mark a resource
+ unhealthy and then do a stack update to replace it. This still correctly
+ manages dependencies but, unlike HARestarter, also avoid replacing
+ dependent resources unnecessarily. An example of this technique can be
+ seen in the autohealing sample templates at
+ https://git.openstack.org/cgit/openstack/heat-templates/tree/hot/autohealing
diff --git a/releasenotes/notes/octavia-resources-0a25720e16dfe55d.yaml b/releasenotes/notes/octavia-resources-0a25720e16dfe55d.yaml
new file mode 100644
index 000000000..54cc17cc4
--- /dev/null
+++ b/releasenotes/notes/octavia-resources-0a25720e16dfe55d.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - Adds new resources for octavia lbaas service.
+ - New resource ``OS::Octavia::LoadBalancer`` is added to create and
+ manage Load Balancers which allow traffic to be directed between servers.
+ - New resource ``OS::Octavia::Listener`` is added to create and
+ manage Listeners which represent a listening endpoint for the Load
+ Balancer.
+ - New resource ``OS::Octavia::Pool`` is added to create and
+ manage Pools which represent a group of nodes. Pools define the subnet
+ where nodes reside, the balancing algorithm, and the nodes themselves.
+ - New resource ``OS::Octavia::PoolMember`` is added to create and
+ manage Pool members which represent a single backend node.
+ - New resource ``OS::Octavia::HealthMonitor`` is added to create and
+ manage Health Monitors which watch status of the Load Balanced servers.
+ - New resource ``OS::Octavia::L7Policy`` is added to create and
+ manage L7 Policies.
+ - New resource ``OS::Octavia::L7Rule`` is added to create and
+ manage L7 Rules.
diff --git a/releasenotes/notes/policy-in-code-124372f6cdb0a497.yaml b/releasenotes/notes/policy-in-code-124372f6cdb0a497.yaml
new file mode 100644
index 000000000..5f4700ec3
--- /dev/null
+++ b/releasenotes/notes/policy-in-code-124372f6cdb0a497.yaml
@@ -0,0 +1,15 @@
+---
+features:
+ - |
+ Heat now support policy in code, which means if you didn't modify any of
+ policy rules, you won't need to add rules in the `policy.yaml` or
+ `policy.json` file. Because from now, heat keeps all default policies under
+ `heat/policies`. You can still generate and modify a `policy.yaml` file
+ which will override policy rules in code if those rules appear in the
+ `policy.yaml` file.
+upgrade:
+ - |
+ Default policy.json file is now removed as we now generate the default
+ policies in code. Please be aware that when using that file in your
+ environment. You still can generate a `policy.yaml` file if that's required
+ in your environment.
diff --git a/releasenotes/notes/project-tags-orchestration-If9125519e35f9f95ea8343cb07c377de9ccf5edf.yaml b/releasenotes/notes/project-tags-orchestration-If9125519e35f9f95ea8343cb07c377de9ccf5edf.yaml
new file mode 100644
index 000000000..e1ed9bf22
--- /dev/null
+++ b/releasenotes/notes/project-tags-orchestration-If9125519e35f9f95ea8343cb07c377de9ccf5edf.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add `tags` parameter for create and update keystone projects.
+ Defined comma deliniated list will insert tags into newly
+ created or updated projects.
diff --git a/releasenotes/notes/remove-SSLMiddleware-2f15049af559f26a.yaml b/releasenotes/notes/remove-SSLMiddleware-2f15049af559f26a.yaml
new file mode 100644
index 000000000..d773b14b2
--- /dev/null
+++ b/releasenotes/notes/remove-SSLMiddleware-2f15049af559f26a.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - |
+ The SSL middleware ``heat.api.middleware.ssl:SSLMiddleware`` that has
+ been deprecated since 6.0.0 has now been removed, check your paste
+ config and ensure it has been replaced by
+ ``oslo_middleware.http_proxy_to_wsgi`` instead.
diff --git a/releasenotes/notes/remove-cloudwatch-api-149403251da97b41.yaml b/releasenotes/notes/remove-cloudwatch-api-149403251da97b41.yaml
new file mode 100644
index 000000000..d8fac64b3
--- /dev/null
+++ b/releasenotes/notes/remove-cloudwatch-api-149403251da97b41.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ The AWS compatible CloudWatch API, deprecated since long has been
+ finally removed. OpenStack deployments, packagers, and deployment
+ projects which deploy/package CloudWatch should take appropriate
+ action to remove support.
diff --git a/releasenotes/notes/remove-heat-resourcetype-constraint-b679618a149fc04e.yaml b/releasenotes/notes/remove-heat-resourcetype-constraint-b679618a149fc04e.yaml
new file mode 100644
index 000000000..90d08d0c2
--- /dev/null
+++ b/releasenotes/notes/remove-heat-resourcetype-constraint-b679618a149fc04e.yaml
@@ -0,0 +1,4 @@
+---
+deprecations:
+ - The heat.resource_type custom constraint has been removed. This constraint
+ never actually worked.
diff --git a/releasenotes/notes/resource_group_removal_policies_mode-d489e0cc49942e2a.yaml b/releasenotes/notes/resource_group_removal_policies_mode-d489e0cc49942e2a.yaml
new file mode 100644
index 000000000..409065c70
--- /dev/null
+++ b/releasenotes/notes/resource_group_removal_policies_mode-d489e0cc49942e2a.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ OS::Heat::ResourceGroup now supports a removal_policies_mode property.
+ This can be used to optionally select different behavior on update where
+ you may wish to overwrite vs append to the current policy.
diff --git a/releasenotes/notes/set-networks-for-trove-cluster-b997a049eedbad17.yaml b/releasenotes/notes/set-networks-for-trove-cluster-b997a049eedbad17.yaml
new file mode 100644
index 000000000..3fafbc9d9
--- /dev/null
+++ b/releasenotes/notes/set-networks-for-trove-cluster-b997a049eedbad17.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Allow to set networks of instances for OS::Trove::Cluster resource.
diff --git a/releasenotes/notes/sync-queens-releasenote-13f68851f7201e37.yaml b/releasenotes/notes/sync-queens-releasenote-13f68851f7201e37.yaml
new file mode 100644
index 000000000..bae19a3cf
--- /dev/null
+++ b/releasenotes/notes/sync-queens-releasenote-13f68851f7201e37.yaml
@@ -0,0 +1,21 @@
+---
+prelude: |
+ Note that Heat is compatible with OpenStack Identity federation, even when
+ using Keystone trusts. It should work after you enable Federation and build
+ the `auto-provisioning map`_ with the heat service user in Keystone.
+ Auto-provisioning has been available in Keystone since the Ocata release.
+
+ .. _auto-provisioning map: https://docs.openstack.org/keystone/latest/advanced-topics/federation/federated_identity.html#auto-provisioning
+other:
+ - |
+ The Heat plugin in Horizon has been replaced with a new stand-alone
+ Horizon plugin, heat-dashboard. You can see more detail in the
+ heat-dashboard repository
+ (https://git.openstack.org/cgit/openstack/heat-dashboard).
+ - |
+ The old Heat Tempest plugin ``heat_tests`` has been removed and replaced
+ by a separate Tempest plugin named ``heat``, in the heat-tempest-plugin
+ repository (https://git.openstack.org/cgit/openstack/heat-tempest-plugin).
+ Functional tests that are appropriate for the Tempest environment have been
+ migrated to the new plugin. Other functional tests remain behind in the
+ heat repository.
diff --git a/releasenotes/notes/system-random-string-38a14ae2cb6f4a24.yaml b/releasenotes/notes/system-random-string-38a14ae2cb6f4a24.yaml
new file mode 100644
index 000000000..713317c8f
--- /dev/null
+++ b/releasenotes/notes/system-random-string-38a14ae2cb6f4a24.yaml
@@ -0,0 +1,6 @@
+---
+security:
+ - |
+ Heat no longer uses standard Python RNG when generating values for
+ OS::Heat::RandomString resource, and instead relies on system's RNG
+ for that.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index da91e25fa..d0db3f0d7 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -63,16 +63,9 @@ master_doc = 'index'
project = u'Heat Release Notes'
copyright = u'2015, Heat Developers'
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-from heat.version import version_info as heat_version
-# The full version, including alpha/beta/rc tags.
-release = heat_version.version_string_with_vcs()
-# The short X.Y version.
-version = heat_version.canonical_version_string()
+# Release notes are version independent, no need to set version and release
+release = ''
+version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 0a316ea9b..34e17cbe3 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ queens
pike
ocata
newton
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index c7b0a4250..0daba68ed 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -1,17 +1,18 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
+# Andi Chandler <andi@gowling.com>, 2018. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: Heat Release Notes 10.0.0\n"
+"Project-Id-Version: Heat Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2017-11-10 13:37+0000\n"
+"POT-Creation-Date: 2018-02-28 16:09+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2017-11-11 09:49+0000\n"
+"PO-Revision-Date: 2018-02-18 12:07+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
-"Language: en-GB\n"
-"X-Generator: Zanata 3.9.6\n"
+"Language: en_GB\n"
+"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid ""
@@ -21,12 +22,6 @@ msgstr ""
"'CEPHFS' can be used as a share protocol when using OS::Manila::Share "
"resource."
-msgid "10.0.0.0b1"
-msgstr "10.0.0.0b1"
-
-msgid "10.0.0.0b1-18"
-msgstr "10.0.0.0b1-18"
-
msgid "5.0.1"
msgstr "5.0.1"
@@ -42,6 +37,9 @@ msgstr "8.0.0"
msgid "9.0.0"
msgstr "9.0.0"
+msgid "9.0.3"
+msgstr "9.0.3"
+
msgid ""
"A new 'parameter_merge_strategies' section can be added to the environment "
"file, where 'default' and/or parameter specific merge strategies can be "
@@ -202,6 +200,13 @@ msgstr ""
"updated. This will keep management rights stay externally."
msgid ""
+"Add `tags` parameter for create and update keystone projects. Defined comma "
+"deliniated list will insert tags into newly created or updated projects."
+msgstr ""
+"Add `tags` parameter for create and update Keystone projects. Defined comma "
+"delineated list will insert tags into newly created or updated projects."
+
+msgid ""
"Add `template_dir` to config. Normally heat has template directory `/etc/"
"heat/templates`. This change makes it more official. In the future, it is "
"possible to implement features like access templates directly from global "
@@ -253,6 +258,13 @@ msgstr ""
"notification resource in Heat with a default interval value of 60."
msgid ""
+"Added ``hostname``, ``hints``, ``security_groups``, and ``mounts`` "
+"properties to Zun Container resources."
+msgstr ""
+"Added ``hostname``, ``hints``, ``security_groups``, and ``mounts`` "
+"properties to Zun Container resources."
+
+msgid ""
"Added a new ``event-sinks`` element to the environment which allows "
"specifying a target where events from the stack are sent. It supports the "
"``zaqar-queue`` element for now."
@@ -262,6 +274,13 @@ msgstr ""
"``zaqar-queue`` element for now."
msgid ""
+"Added a new schema property tags, to parameters, to categorize parameters "
+"based on features."
+msgstr ""
+"Added a new schema property tags, to parameters, to categorise parameters "
+"based on features."
+
+msgid ""
"Added new API calls for showing and listing stack outputs ``/stack/outputs`` "
"and ``/stack/outputs/output_key``."
msgstr ""
@@ -353,6 +372,9 @@ msgstr ""
"prior upper bound (53) and can be lowered by users (if they need to, for "
"example due to LDAP or other internal name limit restrictions)."
+msgid "Adds new resources for octavia lbaas service."
+msgstr "Adds new resources for Octavia LBaaS service."
+
msgid ""
"Adds optional section ``condition`` for resource and output definitions. "
"Condition name defined in ``conditions`` and condition functions can be "
@@ -402,6 +424,9 @@ msgstr ""
"Volume resources with ``deletion_policy`` set to ``Snapshot`` when there is "
"no Cinder backup service available."
+msgid "Allow to set networks of instances for OS::Trove::Cluster resource."
+msgstr "Allow to set networks of instances for OS::Trove::Cluster resource."
+
msgid "Allow to set or update the tags for OS::Neutron::Net resource."
msgstr "Allow to set or update the tags for OS::Neutron::Net resource."
@@ -433,6 +458,17 @@ msgstr ""
"Custom constraints for all sahara resources added - sahara.cluster, sahara."
"cluster_template, sahara.data_source, sahara.job_binary, sahara.job_type."
+msgid ""
+"Default policy.json file is now removed as we now generate the default "
+"policies in code. Please be aware that when using that file in your "
+"environment. You still can generate a `policy.yaml` file if that's required "
+"in your environment."
+msgstr ""
+"Default policy.json file is now removed as we now generate the default "
+"policies in code. Please be aware that when using that file in your "
+"environment. You still can generate a `policy.yaml` file if that's required "
+"in your environment."
+
msgid "Deprecation Notes"
msgstr "Deprecation Notes"
@@ -450,6 +486,19 @@ msgstr ""
"Designate v2 resource plugins OS::Designate::Zone and OS::Designate::"
"RecordSet are newly added."
+msgid ""
+"Force delete the nova instance. If a resource is related with a nova "
+"instance which is in 'SOFT_DELETED' status, the resource can't be deleted, "
+"when nova config 'reclaim_instance_interval'. so, force-delete the nova "
+"instance, and then all the resources are related with the instance would be "
+"processed properly."
+msgstr ""
+"Force delete the Nova instance. If a resource is related with a Nova "
+"instance which is in 'SOFT_DELETED' status, the resource can't be deleted, "
+"when Nova config 'reclaim_instance_interval'. so, force-delete the Nova "
+"instance, and then all the resources are related with the instance would be "
+"processed properly."
+
msgid "Heat Release Notes"
msgstr "Heat Release Notes"
@@ -465,6 +514,28 @@ msgstr ""
"etherpad.openstack.org/p/pike-ptg-cross-project-federation`."
msgid ""
+"Heat no longer uses standard Python RNG when generating values for OS::Heat::"
+"RandomString resource, and instead relies on system's RNG for that."
+msgstr ""
+"Heat no longer uses standard Python RNG when generating values for OS::Heat::"
+"RandomString resource, and instead relies on system's RNG for that."
+
+msgid ""
+"Heat now support policy in code, which means if you didn't modify any of "
+"policy rules, you won't need to add rules in the `policy.yaml` or `policy."
+"json` file. Because from now, heat keeps all default policies under `heat/"
+"policies`. You can still generate and modify a `policy.yaml` file which will "
+"override policy rules in code if those rules appear in the `policy.yaml` "
+"file."
+msgstr ""
+"Heat now support policy in code, which means if you didn't modify any of "
+"policy rules, you won't need to add rules in the `policy.yaml` or `policy."
+"json` file. Because from now, heat keeps all default policies under `heat/"
+"policies`. You can still generate and modify a `policy.yaml` file which will "
+"override policy rules in code if those rules appear in the `policy.yaml` "
+"file."
+
+msgid ""
"Hidden Designate resource plugins ``OS::Designate::Domain`` and ``OS::"
"Designate::Record``. To use ``OS::Designate::Zone`` and ``OS::Designate::"
"RecordSet`` instead."
@@ -575,17 +646,6 @@ msgstr ""
"particular."
msgid ""
-"New ``OS::Zaqar::Subscription`` and ``OS::Zaqar::MistralTrigger`` resource "
-"types allow users to attach to Zaqar queues (respectively) notifications in "
-"general, and notifications that trigger Mistral workflow executions in "
-"particular."
-msgstr ""
-"New ``OS::Zaqar::Subscription`` and ``OS::Zaqar::MistralTrigger`` resource "
-"types allow users to attach to Zaqar queues (respectively) notifications in "
-"general, and notifications that trigger Mistral workflow executions in "
-"particular."
-
-msgid ""
"New config section ``volumes`` with new config option "
"``[volumes]backups_enabled`` (defaults to ``True``). Operators that do not "
"have Cinder backup service deployed in their cloud are encouraged to set "
@@ -673,6 +733,55 @@ msgstr ""
"Compute service quotas for a specific project."
msgid ""
+"New resource ``OS::Octavia::HealthMonitor`` is added to create and manage "
+"Health Monitors which watch status of the Load Balanced servers."
+msgstr ""
+"New resource ``OS::Octavia::HealthMonitor`` is added to create and manage "
+"Health Monitors which watch status of the Load Balanced servers."
+
+msgid ""
+"New resource ``OS::Octavia::L7Policy`` is added to create and manage L7 "
+"Policies."
+msgstr ""
+"New resource ``OS::Octavia::L7Policy`` is added to create and manage L7 "
+"Policies."
+
+msgid ""
+"New resource ``OS::Octavia::L7Rule`` is added to create and manage L7 Rules."
+msgstr ""
+"New resource ``OS::Octavia::L7Rule`` is added to create and manage L7 Rules."
+
+msgid ""
+"New resource ``OS::Octavia::Listener`` is added to create and manage "
+"Listeners which represent a listening endpoint for the Load Balancer."
+msgstr ""
+"New resource ``OS::Octavia::Listener`` is added to create and manage "
+"Listeners which represent a listening endpoint for the Load Balancer."
+
+msgid ""
+"New resource ``OS::Octavia::LoadBalancer`` is added to create and manage "
+"Load Balancers which allow traffic to be directed between servers."
+msgstr ""
+"New resource ``OS::Octavia::LoadBalancer`` is added to create and manage "
+"Load Balancers which allow traffic to be directed between servers."
+
+msgid ""
+"New resource ``OS::Octavia::PoolMember`` is added to create and manage Pool "
+"members which represent a single backend node."
+msgstr ""
+"New resource ``OS::Octavia::PoolMember`` is added to create and manage Pool "
+"members which represent a single backend node."
+
+msgid ""
+"New resource ``OS::Octavia::Pool`` is added to create and manage Pools which "
+"represent a group of nodes. Pools define the subnet where nodes reside, the "
+"balancing algorithm, and the nodes themselves."
+msgstr ""
+"New resource ``OS::Octavia::Pool`` is added to create and manage Pools which "
+"represent a group of nodes. Pools define the subnet where nodes reside, the "
+"balancing algorithm, and the nodes themselves."
+
+msgid ""
"New resource ``OS::Senlin::Cluster`` is added to create a cluster in senlin. "
"A cluster is a group of homogeneous nodes."
msgstr ""
@@ -722,6 +831,17 @@ msgid "Newton Series Release Notes"
msgstr "Newton Series Release Notes"
msgid ""
+"Note that Heat is compatible with OpenStack Identity federation, even when "
+"using Keystone trusts. It should work after you enable Federation and build "
+"the `auto-provisioning map`_ with the heat service user in Keystone. Auto-"
+"provisioning has been available in Keystone since the Ocata release."
+msgstr ""
+"Note that Heat is compatible with OpenStack Identity federation, even when "
+"using Keystone trusts. It should work after you enable Federation and build "
+"the `auto-provisioning map`_ with the heat service user in Keystone. Auto-"
+"provisioning has been available in Keystone since the Ocata release."
+
+msgid ""
"Now heat keystone user name charaters limit increased from 64 to 255. Any "
"extra charaters will lost when truncate the name to the last 255 charaters."
msgstr ""
@@ -761,6 +881,15 @@ msgstr ""
"is created or updated as part of stack."
msgid ""
+"OS::Heat::ResourceGroup now supports a removal_policies_mode property. This "
+"can be used to optionally select different behavior on update where you may "
+"wish to overwrite vs append to the current policy."
+msgstr ""
+"OS::Heat::ResourceGroup now supports a removal_policies_mode property. This "
+"can be used to optionally select different behaviour on update where you may "
+"wish to overwrite vs append to the current policy."
+
+msgid ""
"OS::Magnum::Cluster resource plugin added to support magnum cluster feature, "
"which is provided by magnum ``cluster`` API."
msgstr ""
@@ -876,6 +1005,9 @@ msgstr ""
"meant that fetching all nested events required an inefficient recursive "
"client-side implementation."
+msgid "Queens Series Release Notes"
+msgstr "Queens Series Release Notes"
+
msgid ""
"Resource ``OS::Neutron::Net`` now supports ``l2_adjacency`` atribute on "
"whether L2 connectivity is available across the network or not."
@@ -927,6 +1059,9 @@ msgstr ""
"``qos_policy`` optional property, that will associate with QoS policy to "
"offer different service levels based on the policy rules."
+msgid "Security Issues"
+msgstr "Security Issues"
+
msgid ""
"Since Aodh drop support for combination alarm, therefore OS::Aodh::"
"CombinationAlarm is now mark as hidden resource with directly inheriting "
@@ -1005,6 +1140,15 @@ msgstr ""
"function."
msgid ""
+"The AWS compatible CloudWatch API, deprecated since long has been finally "
+"removed. OpenStack deployments, packagers, and deployment projects which "
+"deploy/package CloudWatch should take appropriate action to remove support."
+msgstr ""
+"The AWS compatible CloudWatch API, deprecated since long has been finally "
+"removed. OpenStack deployments, packagers, and deployment projects which "
+"deploy/package CloudWatch should take appropriate action to remove support."
+
+msgid ""
"The AWS::EC2::EIP domain is always assumed to be 'vpc', since nova-network "
"is not supported in OpenStack any longer."
msgstr ""
@@ -1012,6 +1156,15 @@ msgstr ""
"is not supported in OpenStack any longer."
msgid ""
+"The Heat plugin in Horizon has been replaced with a new stand-alone Horizon "
+"plugin, heat-dashboard. You can see more detail in the heat-dashboard "
+"repository (https://git.openstack.org/cgit/openstack/heat-dashboard)."
+msgstr ""
+"The Heat plugin in Horizon has been replaced with a new stand-alone Horizon "
+"plugin, Heat-dashboard. You can see more detail in the Heat-dashboard "
+"repository (https://git.openstack.org/cgit/openstack/heat-dashboard)."
+
+msgid ""
"The OS::Nova::Server now supports a new property user_data_update_policy, "
"which may be set to either 'REPLACE' (default) or 'IGNORE' if you wish to "
"allow user_data updates to be ignored on stack update. This is useful when "
@@ -1034,6 +1187,38 @@ msgstr ""
"escaping and IPv6 addresses."
msgid ""
+"The SSL middleware ``heat.api.middleware.ssl:SSLMiddleware`` that has been "
+"deprecated since 6.0.0 has now been removed, check your paste config and "
+"ensure it has been replaced by ``oslo_middleware.http_proxy_to_wsgi`` "
+"instead."
+msgstr ""
+"The SSL middleware ``heat.api.middleware.ssl:SSLMiddleware`` that has been "
+"deprecated since 6.0.0 has now been removed, check your paste config and "
+"ensure it has been replaced by ``oslo_middleware.http_proxy_to_wsgi`` "
+"instead."
+
+msgid ""
+"The ``OS::Heat::HARestarter`` resource type is no longer supported. This "
+"resource type is now hidden from the documentation. HARestarter resources in "
+"stacks, including pre-existing ones, are now only placeholders and will no "
+"longer do anything. The recommended alternative is to mark a resource "
+"unhealthy and then do a stack update to replace it. This still correctly "
+"manages dependencies but, unlike HARestarter, also avoid replacing dependent "
+"resources unnecessarily. An example of this technique can be seen in the "
+"autohealing sample templates at https://git.openstack.org/cgit/openstack/"
+"heat-templates/tree/hot/autohealing"
+msgstr ""
+"The ``OS::Heat::HARestarter`` resource type is no longer supported. This "
+"resource type is now hidden from the documentation. HARestarter resources in "
+"stacks, including pre-existing ones, are now only placeholders and will no "
+"longer do anything. The recommended alternative is to mark a resource "
+"unhealthy and then do a stack update to replace it. This still correctly "
+"manages dependencies but, unlike HARestarter, also avoid replacing dependent "
+"resources unnecessarily. An example of this technique can be seen in the "
+"autohealing sample templates at https://git.openstack.org/cgit/openstack/"
+"heat-templates/tree/hot/autohealing"
+
+msgid ""
"The ``resource mark unhealthy`` command now accepts either a logical "
"resource name (as it did previously) or a physical resource ID to identify "
"the resource to be marked unhealthy."
@@ -1043,6 +1228,13 @@ msgstr ""
"the resource to be marked unhealthy."
msgid ""
+"The database upgrade for Heat Queens release drops 'watch_rule' and "
+"'watch_data' tables from the heat database."
+msgstr ""
+"The database upgrade for Heat Queens release drops 'watch_rule' and "
+"'watch_data' tables from the heat database."
+
+msgid ""
"The event list GET REST API call now has a different behaviour when the "
"'nested_depth' parameter is set to an integer greater than zero. The "
"response will contain all events down to the requested nested depth."
@@ -1052,6 +1244,13 @@ msgstr ""
"response will contain all events down to the requested nested depth."
msgid ""
+"The heat.resource_type custom constraint has been removed. This constraint "
+"never actually worked."
+msgstr ""
+"The heat.resource_type custom constraint has been removed. This constraint "
+"never actually worked."
+
+msgid ""
"The list_concat function was added, which concats several lists using "
"python's extend function."
msgstr ""
@@ -1067,10 +1266,43 @@ msgstr ""
"function ``list_concat`` to concat several lists using python's extend "
"function and make sure without repeating items."
+msgid ""
+"The old Heat Tempest plugin ``heat_tests`` has been removed and replaced by "
+"a separate Tempest plugin named ``heat``, in the heat-tempest-plugin "
+"repository (https://git.openstack.org/cgit/openstack/heat-tempest-plugin). "
+"Functional tests that are appropriate for the Tempest environment have been "
+"migrated to the new plugin. Other functional tests remain behind in the heat "
+"repository."
+msgstr ""
+"The old Heat Tempest plugin ``heat_tests`` has been removed and replaced by "
+"a separate Tempest plugin named ``heat``, in the heat-tempest-plugin "
+"repository (https://git.openstack.org/cgit/openstack/heat-tempest-plugin). "
+"Functional tests that are appropriate for the Tempest environment have been "
+"migrated to the new plugin. Other functional tests remain behind in the heat "
+"repository."
+
+msgid ""
+"The template validate API call now returns the Environment calculated by "
+"heat - this enables preview of the merged environment when using "
+"parameter_merge_strategy prior to creating the stack"
+msgstr ""
+"The template validate API call now returns the Environment calculated by "
+"heat - this enables preview of the merged environment when using "
+"parameter_merge_strategy prior to creating the stack"
+
msgid "This feature only supports templates with version over `2016-10-14`."
msgstr "This feature only supports templates with version over `2016-10-14`."
msgid ""
+"Threshold alarm which uses ceilometer API is deprecated in aodh since Ocata. "
+"Please use ``OS::Aodh::GnocchiAggregationByResourcesAlarm`` in place of "
+"``OS::Aodh::Alarm``."
+msgstr ""
+"Threshold alarm which uses the Ceilometer API is deprecated in Aodh since "
+"Ocata. Please use ``OS::Aodh::GnocchiAggregationByResourcesAlarm`` in place "
+"of ``OS::Aodh::Alarm``."
+
+msgid ""
"Two new policies soft-affinity and soft-anti-affinity have been supported "
"for the OS::Nova::ServerGroup resource."
msgstr ""
diff --git a/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po
index 7ce057f48..07d5d45a4 100644
--- a/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po
@@ -1,17 +1,17 @@
# minwook-shin <minwook0106@gmail.com>, 2017. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: Heat Release Notes 10.0.0\n"
+"Project-Id-Version: Heat Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2017-10-06 21:37+0000\n"
+"POT-Creation-Date: 2018-02-28 16:09+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-08-05 01:49+0000\n"
"Last-Translator: minwook-shin <minwook0106@gmail.com>\n"
"Language-Team: Korean (South Korea)\n"
-"Language: ko-KR\n"
-"X-Generator: Zanata 3.9.6\n"
+"Language: ko_KR\n"
+"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=1; plural=0\n"
msgid "5.0.1"
diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst
new file mode 100644
index 000000000..36ac6160c
--- /dev/null
+++ b/releasenotes/source/queens.rst
@@ -0,0 +1,6 @@
+===================================
+ Queens Series Release Notes
+===================================
+
+.. release-notes::
+ :branch: stable/queens
diff --git a/requirements.txt b/requirements.txt
index 75a46bd86..c803d65b2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,54 +6,52 @@ pbr!=2.1.0,>=2.0.0 # Apache-2.0
Babel!=2.4.0,>=2.3.4 # BSD
croniter>=0.3.4 # MIT License
cryptography!=2.0,>=1.9 # BSD/Apache-2.0
-debtcollector>=1.2.0 # Apache-2.0
eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT
-keystoneauth1>=3.2.0 # Apache-2.0
+keystoneauth1>=3.4.0 # Apache-2.0
keystonemiddleware>=4.17.0 # Apache-2.0
lxml!=3.7.0,>=3.4.1 # BSD
netaddr>=0.7.18 # BSD
-openstacksdk>=0.9.19 # Apache-2.0
+openstacksdk>=0.11.2 # Apache-2.0
oslo.cache>=1.26.0 # Apache-2.0
-oslo.config>=4.6.0 # Apache-2.0
-oslo.concurrency>=3.20.0 # Apache-2.0
+oslo.config>=5.1.0 # Apache-2.0
+oslo.concurrency>=3.25.0 # Apache-2.0
oslo.context>=2.19.2 # Apache-2.0
oslo.db>=4.27.0 # Apache-2.0
oslo.i18n>=3.15.3 # Apache-2.0
-oslo.log>=3.30.0 # Apache-2.0
+oslo.log>=3.36.0 # Apache-2.0
oslo.messaging>=5.29.0 # Apache-2.0
oslo.middleware>=3.31.0 # Apache-2.0
-oslo.policy>=1.23.0 # Apache-2.0
+oslo.policy>=1.30.0 # Apache-2.0
oslo.reports>=1.18.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-oslo.service>=1.24.0 # Apache-2.0
-oslo.utils>=3.31.0 # Apache-2.0
+oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
+oslo.utils>=3.33.0 # Apache-2.0
osprofiler>=1.4.0 # Apache-2.0
-oslo.versionedobjects>=1.28.0 # Apache-2.0
+oslo.versionedobjects>=1.31.2 # Apache-2.0
PasteDeploy>=1.5.0 # MIT
aodhclient>=0.9.0 # Apache-2.0
python-barbicanclient!=4.5.0,!=4.5.1,>=4.0.0 # Apache-2.0
python-ceilometerclient>=2.5.0 # Apache-2.0
-python-cinderclient>=3.2.0 # Apache-2.0
+python-cinderclient>=3.3.0 # Apache-2.0
python-designateclient>=2.7.0 # Apache-2.0
python-glanceclient>=2.8.0 # Apache-2.0
-gnocchiclient>=3.3.1 # Apache-2.0
python-heatclient>=1.10.0 # Apache-2.0
python-keystoneclient>=3.8.0 # Apache-2.0
python-magnumclient>=2.1.0 # Apache-2.0
python-manilaclient>=1.16.0 # Apache-2.0
-python-mistralclient>=3.1.0 # Apache-2.0
+python-mistralclient!=3.2.0,>=3.1.0 # Apache-2.0
python-monascaclient>=1.7.0 # Apache-2.0
-python-neutronclient>=6.3.0 # Apache-2.0
+python-neutronclient>=6.7.0 # Apache-2.0
python-novaclient>=9.1.0 # Apache-2.0
+python-octaviaclient>=1.3.0 # Apache-2.0
python-openstackclient>=3.12.0 # Apache-2.0
python-saharaclient>=1.4.0 # Apache-2.0
-python-senlinclient>=1.1.0 # Apache-2.0
python-swiftclient>=3.2.0 # Apache-2.0
python-troveclient>=2.2.0 # Apache-2.0
python-zaqarclient>=1.0.0 # Apache-2.0
-python-zunclient>=0.2.0 # Apache-2.0
+python-zunclient>=1.0.0 # Apache-2.0
pytz>=2013.6 # MIT
-PyYAML>=3.10 # MIT
+PyYAML>=3.12 # MIT
requests>=2.14.2 # Apache-2.0
tenacity>=3.2.1 # Apache-2.0
Routes>=2.3.1 # MIT
diff --git a/setup.cfg b/setup.cfg
index bc656ac69..be6863e98 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,7 +22,6 @@ classifier =
data_files =
etc/heat =
etc/heat/api-paste.ini
- etc/heat/policy.json
etc/heat/environment.d = etc/heat/environment.d/*
etc/heat/templates = etc/heat/templates/*
packages =
@@ -38,14 +37,12 @@ console_scripts =
heat-all = heat.cmd.all:main
heat-api = heat.cmd.api:main
heat-api-cfn = heat.cmd.api_cfn:main
- heat-api-cloudwatch = heat.cmd.api_cloudwatch:main
heat-engine = heat.cmd.engine:main
heat-manage = heat.cmd.manage:main
wsgi_scripts =
heat-wsgi-api = heat.httpd.heat_api:init_application
heat-wsgi-api-cfn = heat.httpd.heat_api_cfn:init_application
- heat-wsgi-api-cloudwatch = heat.httpd.heat_api_cloudwatch:init_application
oslo.config.opts =
heat.common.config = heat.common.config:list_opts
@@ -56,7 +53,6 @@ oslo.config.opts =
heat.engine.clients = heat.engine.clients:list_opts
heat.engine.notification = heat.engine.notification:list_opts
heat.engine.resources = heat.engine.resources:list_opts
- heat.api.middleware.ssl = heat.api.middleware.ssl:list_opts
heat.api.aws.ec2token = heat.api.aws.ec2token:list_opts
heat_integrationtests.common.config = heat_integrationtests.common.config:list_opts
@@ -81,6 +77,7 @@ heat.clients =
monasca = heat.engine.clients.os.monasca:MonascaClientPlugin
nova = heat.engine.clients.os.nova:NovaClientPlugin
neutron = heat.engine.clients.os.neutron:NeutronClientPlugin
+ octavia = heat.engine.clients.os.octavia:OctaviaClientPlugin
openstack = heat.engine.clients.os.openstacksdk:OpenStackSDKPlugin
sahara = heat.engine.clients.os.sahara:SaharaClientPlugin
senlin = heat.engine.clients.os.senlin:SenlinClientPlugin
@@ -113,7 +110,6 @@ heat.constraints =
designate.domain = heat.engine.clients.os.designate:DesignateDomainConstraint
designate.zone = heat.engine.clients.os.designate:DesignateZoneConstraint
glance.image = heat.engine.clients.os.glance:ImageConstraint
- heat.resource_type = heat.engine.constraint.heat_constraints:ResourceTypeConstraint
keystone.domain = heat.engine.clients.os.keystone.keystone_constraints:KeystoneDomainConstraint
keystone.group = heat.engine.clients.os.keystone.keystone_constraints:KeystoneGroupConstraint
keystone.project = heat.engine.clients.os.keystone.keystone_constraints:KeystoneProjectConstraint
@@ -150,6 +146,10 @@ heat.constraints =
nova.keypair = heat.engine.clients.os.nova:KeypairConstraint
nova.network = heat.engine.constraint.common_constraints:TestConstraintDelay
nova.server = heat.engine.clients.os.nova:ServerConstraint
+ octavia.listener = heat.engine.clients.os.octavia:ListenerConstraint
+ octavia.loadbalancer = heat.engine.clients.os.octavia:LoadbalancerConstraint
+ octavia.l7policy = heat.engine.clients.os.octavia:L7PolicyConstraint
+ octavia.pool = heat.engine.clients.os.octavia:PoolConstraint
sahara.cluster = heat.engine.clients.os.sahara:ClusterConstraint
sahara.cluster_template = heat.engine.clients.os.sahara:ClusterTemplateConstraint
sahara.data_source = heat.engine.clients.os.sahara:DataSourceConstraint
@@ -187,9 +187,6 @@ heat.templates =
heat_template_version.2018-03-02 = heat.engine.hot.template:HOTemplate20180302
heat_template_version.queens = heat.engine.hot.template:HOTemplate20180302
-tempest.test_plugins =
- heat_tests = heat_integrationtests.plugin:HeatTempestPlugin
-
[global]
setup-hooks =
pbr.hooks.setup_hook
diff --git a/test-requirements.txt b/test-requirements.txt
index 443866dd9..16e09fd26 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -11,20 +11,17 @@ kombu!=4.0.2,>=4.0.0 # BSD
mock>=2.0.0 # BSD
mox3>=0.20.0 # Apache-2.0
PyMySQL>=0.7.6 # MIT License
-openstackdocstheme>=1.17.0 # Apache-2.0
+openstackdocstheme>=1.18.1 # Apache-2.0
os-api-ref>=1.4.0 # Apache-2.0
os-testr>=1.0.0 # Apache-2.0
-oslotest>=1.10.0 # Apache-2.0
+oslotest>=3.2.0 # Apache-2.0
qpid-python>=0.26;python_version=='2.7' # Apache-2.0
psycopg2>=2.6.2 # LGPL/ZPL
-sphinx>=1.6.2 # BSD
+sphinx!=1.6.6,>=1.6.2 # BSD
testrepository>=0.0.18 # Apache-2.0/BSD
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=2.2.0 # MIT
testresources>=2.0.0 # Apache-2.0/BSD
reno>=2.5.0 # Apache-2.0
# Next are used in integration tests only
-os-collect-config>=5.0.0 # Apache-2.0
-paramiko>=2.0.0 # LGPLv2.1+
tempest>=17.1.0 # Apache-2.0
-gabbi>=1.35.0 # Apache-2.0
diff --git a/tools/README.rst b/tools/README.rst
index de4428aba..7bade8685 100644
--- a/tools/README.rst
+++ b/tools/README.rst
@@ -25,3 +25,18 @@ test-requires-deb
test-requires-rpm
list of RPM packages as of Fedora 20
+
+Review dashboards
+=================
+
+Generate gerrit review URL for heat. This can pop up some patches
+that might requires reviews. You can generate it with following
+command under `gerrit-dash-creator` repo
+( https://git.openstack.org/cgit/openstack/gerrit-dash-creator )
+
+ $ ./gerrit-dash-creator heat.dash
+
+The sample of heat.dash can be found under ./dashboards/
+
+Get the output URL and add it to your gerrit menu
+(at ``https://review.openstack.org/#/settings/preferences``).
diff --git a/tools/custom_guidelines.py b/tools/custom_guidelines.py
index 3d3627f1c..ce0defc80 100644
--- a/tools/custom_guidelines.py
+++ b/tools/custom_guidelines.py
@@ -167,12 +167,12 @@ class HeatCustomGuidelines(object):
while idx < len(lines):
if ('properties_schema' in lines[idx] or
'attributes_schema' in lines[idx]):
- level = len(re.findall('(\{|\()', lines[idx]))
- level -= len(re.findall('(\}|\))', lines[idx]))
+ level = len(re.findall(r'(\{|\()', lines[idx]))
+ level -= len(re.findall(r'(\}|\))', lines[idx]))
idx += 1
while level != 0:
- level += len(re.findall('(\{|\()', lines[idx]))
- level -= len(re.findall('(\}|\))', lines[idx]))
+ level += len(re.findall(r'(\{|\()', lines[idx]))
+ level -= len(re.findall(r'(\}|\))', lines[idx]))
if re.search("^((\'|\") )", lines[idx]):
kwargs.update(
{'details': 'line %s' % idx,
@@ -180,8 +180,8 @@ class HeatCustomGuidelines(object):
'be on previous line'),
'snippet': lines[idx]})
self.print_guideline_error(**kwargs)
- elif (re.search("(\S(\'|\"))$", lines[idx - 1]) and
- re.search("^((\'|\")\S)", lines[idx])):
+ elif (re.search("(\\S(\'|\"))$", lines[idx - 1]) and
+ re.search("^((\'|\")\\S)", lines[idx])):
kwargs.update(
{'details': 'line %s' % (idx - 1),
'message': _('Omitted whitespace at the '
@@ -205,7 +205,7 @@ class HeatCustomGuidelines(object):
'terminator at the end') % error_key.title(),
'snippet': description})
self.print_guideline_error(**error_kwargs)
- if re.search("\s{2,}", description):
+ if re.search(r"\s{2,}", description):
error_kwargs.update(
{'message': _('%s description contains double or more '
'whitespaces') % error_key.title(),
@@ -214,7 +214,7 @@ class HeatCustomGuidelines(object):
def _check_description_details(self, doclines, error_kwargs,
error_key):
- if re.search("\S", doclines[1]):
+ if re.search(r"\S", doclines[1]):
error_kwargs.update(
{'message': _('%s description summary and '
'main resource description should be '
@@ -240,7 +240,7 @@ class HeatCustomGuidelines(object):
params = False
for line in doclines[1:]:
- if re.search("\s{2,}", line):
+ if re.search(r"\s{2,}", line):
error_kwargs.update(
{'message': _('%s description '
'contains double or more '
diff --git a/tools/dashboards/heat.dash b/tools/dashboards/heat.dash
new file mode 100644
index 000000000..e41fc9b0f
--- /dev/null
+++ b/tools/dashboards/heat.dash
@@ -0,0 +1,33 @@
+[dashboard]
+title = Heat Review Inbox
+description = Review Inbox
+foreach = (project:openstack/heat OR project:openstack/heat-agents OR
+ project:openstack/heat-templates OR project:openstack/python-heatclient OR
+ project:openstack/heat-cfntools OR project:openstack/heat-specs OR
+ project:openstack/heat-dashboard OR project:openstack/heat-tempest-plugin)
+ status:open NOT owner:self NOT label:Workflow<=-1 label:Verified>=1
+ NOT label:Code-Review<=-1,self NOT label:Code-Review>=1,self
+
+[section "Heat Specs"]
+query = project:openstack/heat-specs
+
+[section "Bug Fixes"]
+query = topic:^bug/.*
+
+[section "Blueprints"]
+query = message:"Blueprint"
+
+[section "Needs Feedback (Changes older than 5 days that have not been reviewed by anyone)"]
+query = NOT label:Code-Review<=2 age:5d
+
+[section "You are a reviewer, but haven't voted in the current revision"]
+query = reviewer:self
+
+[section "Needs final +2"]
+query = label:Code-Review>=2 limit:50
+
+[section "New Contributors"]
+query = reviewer:10068
+
+[section "Passed Zuul, No Negative Feedback"]
+query = NOT label:Code-Review>=2 NOT label:Code-Review<=-1 limit:50
diff --git a/tox.ini b/tox.ini
index 64f9b7498..be0da0a0a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,14 +7,14 @@ skipsdist = True
setenv = VIRTUAL_ENV={envdir}
PYTHONWARNINGS=default::DeprecationWarning
OS_TEST_PATH=heat/tests
- TESTR_START_DIR=heat/tests
usedevelop = True
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
find {toxinidir} -not -path '{toxinidir}/.tox/*' -name '*.py[c|o]' -delete
- python setup.py testr --slowest --testr-args='{posargs}'
+ stestr run '{posargs}'
+ stestr slowest
whitelist_externals =
bash
@@ -24,11 +24,11 @@ passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
[testenv:py27log]
commands =
find . -type f -name "*.py[c|o]" -delete
- ostestr '^(?!heat_integrationtests){posargs}'
+ stestr run '^(?!heat_integrationtests){posargs}'
[testenv:pep8]
commands =
- flake8 heat bin/heat-api bin/heat-api-cfn bin/heat-api-cloudwatch bin/heat-engine bin/heat-manage contrib heat_integrationtests doc/source
+ flake8 heat bin/heat-api bin/heat-api-cfn bin/heat-engine bin/heat-manage contrib heat_integrationtests doc/source
python tools/custom_guidelines.py --exclude heat/engine/resources/aws
# The following bandit tests are being skipped:
# B101: Test for use of assert
@@ -49,8 +49,15 @@ commands =
commands = {posargs}
[testenv:cover]
+setenv =
+ PYTHON=coverage run --source heat --parallel-mode
commands =
- python setup.py testr --coverage --testr-args='^(?!heat_integrationtests){posargs}'
+ coverage erase
+ find {toxinidir} -not -path '{toxinidir}/.tox/*' -name '*.py[c|o]' -delete
+ stestr run '{posargs}'
+ coverage combine
+ coverage html -d cover
+ coverage xml -o cover/coverage.xml
coverage report
[testenv:docs]
@@ -71,6 +78,7 @@ commands =
[testenv:genconfig]
commands =
oslo-config-generator --config-file=config-generator.conf
+ oslo-config-generator --config-file=heat_integrationtests/config-generator.conf
[testenv:genpolicy]
commands =