summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rwxr-xr-xbin/heat-api10
-rwxr-xr-xbin/heat-api-cfn6
-rwxr-xr-xbin/heat-api-cloudwatch7
-rwxr-xr-xbin/heat-engine6
-rwxr-xr-xdevstack/upgrade/resources.sh97
-rw-r--r--devstack/upgrade/settings4
-rwxr-xr-xdevstack/upgrade/shutdown.sh33
-rw-r--r--devstack/upgrade/templates/random_string.yaml4
-rwxr-xr-xdevstack/upgrade/upgrade.sh88
-rw-r--r--doc/source/ext/resources.py91
-rw-r--r--doc/source/glossary.rst7
-rw-r--r--doc/source/gmr.rst73
-rw-r--r--doc/source/index.rst1
-rw-r--r--doc/source/template_guide/basic_resources.rst41
-rw-r--r--doc/source/template_guide/composition.rst2
-rw-r--r--doc/source/template_guide/hot_spec.rst41
-rw-r--r--doc/source/template_guide/software_deployment.rst42
-rw-r--r--heat/api/openstack/v1/resources.py31
-rw-r--r--heat/api/openstack/v1/stacks.py28
-rw-r--r--heat/common/config.py7
-rw-r--r--heat/common/crypt.py59
-rw-r--r--heat/common/exception.py15
-rw-r--r--heat/common/wsgi.py270
-rw-r--r--heat/db/sqlalchemy/api.py51
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/016_timeout_nullable.py22
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/017_event_state_status.py26
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/018_resource_id_uuid.py25
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/019_resource_action_status.py27
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/020_stack_action.py24
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/021_resource_data.py40
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py21
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/023_raw_template_mysql_longtext.py24
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/024_event_resource_name.py21
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/025_user_creds_drop_service.py23
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/026_user_creds_drop_aws.py23
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/027_user_creds_trusts.py29
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/028_havana.py (renamed from heat/db/sqlalchemy/migrate_repo/versions/015_grizzly.py)56
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/028_text_mysql_longtext.py34
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/057_resource_uuid_to_id.py5
-rw-r--r--heat/db/sqlalchemy/migration.py2
-rw-r--r--heat/engine/attributes.py12
-rw-r--r--heat/engine/clients/__init__.py2
-rw-r--r--heat/engine/clients/client_plugin.py12
-rw-r--r--heat/engine/clients/os/designate.py35
-rw-r--r--heat/engine/clients/os/nova.py5
-rw-r--r--heat/engine/environment.py28
-rw-r--r--heat/engine/hot/functions.py63
-rw-r--r--heat/engine/hot/template.py3
-rw-r--r--heat/engine/parameters.py6
-rw-r--r--heat/engine/properties.py3
-rw-r--r--heat/engine/resource.py101
-rw-r--r--heat/engine/resources/aws/autoscaling/autoscaling_group.py5
-rw-r--r--heat/engine/resources/aws/autoscaling/scaling_policy.py2
-rw-r--r--heat/engine/resources/aws/cfn/wait_condition_handle.py2
-rw-r--r--heat/engine/resources/aws/ec2/instance.py14
-rw-r--r--heat/engine/resources/openstack/barbican/order.py35
-rw-r--r--heat/engine/resources/openstack/barbican/secret.py6
-rw-r--r--heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py121
-rw-r--r--heat/engine/resources/openstack/heat/ha_restarter.py2
-rw-r--r--heat/engine/resources/openstack/heat/scaling_policy.py42
-rw-r--r--heat/engine/resources/openstack/heat/software_deployment.py4
-rw-r--r--heat/engine/resources/openstack/manila/share_network.py3
-rw-r--r--heat/engine/resources/openstack/mistral/workflow.py2
-rw-r--r--heat/engine/resources/openstack/nova/server.py14
-rw-r--r--heat/engine/resources/signal_responder.py16
-rw-r--r--heat/engine/resources/template_resource.py4
-rw-r--r--heat/engine/service.py9
-rwxr-xr-xheat/engine/stack.py39
-rw-r--r--heat/engine/worker.py20
-rw-r--r--heat/locale/de/LC_MESSAGES/heat-log-error.po63
-rw-r--r--heat/locale/es/LC_MESSAGES/heat-log-error.po32
-rw-r--r--heat/locale/es/LC_MESSAGES/heat-log-info.po56
-rw-r--r--heat/locale/fr/LC_MESSAGES/heat-log-error.po25
-rw-r--r--heat/locale/fr/LC_MESSAGES/heat-log-info.po48
-rw-r--r--heat/locale/heat-log-error.pot91
-rw-r--r--heat/locale/heat-log-info.pot191
-rw-r--r--heat/locale/heat-log-warning.pot91
-rw-r--r--heat/locale/heat.pot1022
-rw-r--r--heat/locale/ko_KR/LC_MESSAGES/heat-log-error.po17
-rw-r--r--heat/locale/pt_BR/LC_MESSAGES/heat-log-error.po19
-rw-r--r--heat/objects/raw_template.py11
-rwxr-xr-xheat/objects/resource.py9
-rw-r--r--heat/rpc/api.py4
-rw-r--r--heat/rpc/client.py14
-rw-r--r--heat/scaling/cooldown.py28
-rw-r--r--heat/tests/api/__init__.py0
-rw-r--r--heat/tests/api/aws/__init__.py0
-rw-r--r--heat/tests/api/aws/test_api_aws.py (renamed from heat/tests/test_api_aws.py)0
-rw-r--r--heat/tests/api/aws/test_api_ec2token.py (renamed from heat/tests/test_api_ec2token.py)0
-rw-r--r--heat/tests/api/cfn/__init__.py0
-rw-r--r--heat/tests/api/cfn/test_api_cfn_v1.py (renamed from heat/tests/test_api_cfn_v1.py)14
-rw-r--r--heat/tests/api/cloudwatch/__init__.py0
-rw-r--r--heat/tests/api/cloudwatch/test_api_cloudwatch.py (renamed from heat/tests/test_api_cloudwatch.py)50
-rw-r--r--heat/tests/api/middleware/__init__.py0
-rw-r--r--heat/tests/api/middleware/test_ssl_middleware.py (renamed from heat/tests/test_ssl_middleware.py)0
-rw-r--r--heat/tests/api/middleware/test_version_negotiation_middleware.py (renamed from heat/tests/test_version_negotiation_middleware.py)0
-rw-r--r--heat/tests/api/openstack/__init__.py0
-rw-r--r--heat/tests/api/openstack/test_api_openstack_v1.py (renamed from heat/tests/test_api_openstack_v1.py)209
-rw-r--r--heat/tests/api/openstack/test_api_openstack_v1_util.py (renamed from heat/tests/test_api_openstack_v1_util.py)0
-rw-r--r--heat/tests/api/openstack/test_api_openstack_v1_views_stacks_view_builder.py (renamed from heat/tests/test_api_openstack_v1_views_stacks_view_builder.py)0
-rw-r--r--heat/tests/api/openstack/test_api_openstack_v1_views_views_common.py (renamed from heat/tests/test_api_openstack_v1_views_views_common.py)0
-rw-r--r--heat/tests/api/test_wsgi.py (renamed from heat/tests/test_wsgi.py)81
-rw-r--r--heat/tests/autoscaling/test_heat_scaling_group.py2
-rw-r--r--heat/tests/autoscaling/test_heat_scaling_policy.py33
-rw-r--r--heat/tests/autoscaling/test_scaling_group.py2
-rw-r--r--heat/tests/autoscaling/test_scaling_policy.py27
-rw-r--r--heat/tests/aws/test_instance.py56
-rw-r--r--heat/tests/aws/test_volume.py2
-rw-r--r--heat/tests/ceilometer/__init__.py0
-rw-r--r--heat/tests/ceilometer/test_ceilometer_alarm.py (renamed from heat/tests/test_ceilometer_alarm.py)0
-rw-r--r--heat/tests/ceilometer/test_gnocchi_alarm.py (renamed from heat/tests/test_gnocchi_alarm.py)0
-rw-r--r--heat/tests/cinder/__init__.py0
-rw-r--r--heat/tests/cinder/test_cinder_volume_type.py (renamed from heat/tests/test_cinder_volume_type.py)0
-rw-r--r--heat/tests/cinder/test_volume.py (renamed from heat/tests/openstack/test_volume.py)2
-rw-r--r--heat/tests/cinder/test_volume_type_encryption.py117
-rw-r--r--heat/tests/cinder/test_volume_utils.py (renamed from heat/tests/test_volume_utils.py)0
-rw-r--r--heat/tests/clients/__init__.py0
-rw-r--r--heat/tests/clients/test_barbican_client.py (renamed from heat/tests/test_barbican_client.py)0
-rw-r--r--heat/tests/clients/test_cinder_client.py (renamed from heat/tests/test_cinder_client.py)0
-rw-r--r--heat/tests/clients/test_clients.py (renamed from heat/tests/test_clients.py)3
-rw-r--r--heat/tests/clients/test_designate_client.py156
-rw-r--r--heat/tests/clients/test_glance_client.py (renamed from heat/tests/test_glance_client.py)0
-rw-r--r--heat/tests/clients/test_heat_client.py (renamed from heat/tests/test_heatclient.py)16
-rw-r--r--heat/tests/clients/test_keystone_client.py (renamed from heat/tests/keystone/test_client.py)0
-rw-r--r--heat/tests/clients/test_magnum_client.py (renamed from heat/tests/test_magnum_client.py)0
-rw-r--r--heat/tests/clients/test_manila_client.py (renamed from heat/tests/test_manila_client.py)0
-rw-r--r--heat/tests/clients/test_mistral_client.py (renamed from heat/tests/test_mistral_client.py)0
-rw-r--r--heat/tests/clients/test_neutron_client.py (renamed from heat/tests/neutron/test_neutron_client.py)0
-rw-r--r--heat/tests/clients/test_nova_client.py (renamed from heat/tests/test_nova_client.py)0
-rw-r--r--heat/tests/clients/test_sahara_client.py (renamed from heat/tests/test_sahara_client.py)0
-rw-r--r--heat/tests/clients/test_swift_client.py (renamed from heat/tests/test_swift_client.py)0
-rw-r--r--heat/tests/clients/test_zaqar_client.py (renamed from heat/tests/test_zaqar_client.py)0
-rw-r--r--heat/tests/common.py6
-rw-r--r--heat/tests/db/test_sqlalchemy_api.py24
-rw-r--r--heat/tests/engine/test_service_engine.py4
-rw-r--r--heat/tests/engine/test_software_config.py2
-rw-r--r--heat/tests/engine/test_stack_create.py2
-rw-r--r--heat/tests/engine/test_stack_events.py238
-rw-r--r--heat/tests/generic_resource.py10
-rw-r--r--heat/tests/manila/__init__.py0
-rw-r--r--heat/tests/manila/test_manila_security_service.py (renamed from heat/tests/test_manila_security_service.py)0
-rw-r--r--heat/tests/manila/test_manila_share.py (renamed from heat/tests/test_manila_share.py)18
-rw-r--r--heat/tests/manila/test_manila_share_type.py (renamed from heat/tests/test_manila_share_type.py)0
-rw-r--r--heat/tests/manila/test_share_network.py (renamed from heat/tests/test_share_network.py)18
-rw-r--r--heat/tests/mistral/__init__.py0
-rw-r--r--heat/tests/mistral/test_mistral_cron_trigger.py (renamed from heat/tests/test_mistral_cron_trigger.py)10
-rw-r--r--heat/tests/mistral/test_mistral_workflow.py (renamed from heat/tests/test_mistral_workflow.py)32
-rw-r--r--heat/tests/neutron/test_neutron.py4
-rw-r--r--heat/tests/nova/test_nova_floatingip.py (renamed from heat/tests/test_nova_floatingip.py)0
-rw-r--r--heat/tests/nova/test_nova_keypair.py (renamed from heat/tests/test_nova_keypair.py)0
-rw-r--r--heat/tests/nova/test_nova_servergroup.py (renamed from heat/tests/test_nova_servergroup.py)0
-rw-r--r--heat/tests/nova/test_server.py (renamed from heat/tests/test_server.py)104
-rw-r--r--heat/tests/openstack/designate/__init__.py0
-rw-r--r--heat/tests/test_attributes.py12
-rw-r--r--heat/tests/test_crypt.py38
-rw-r--r--heat/tests/test_engine_service.py282
-rw-r--r--heat/tests/test_engine_worker.py45
-rw-r--r--heat/tests/test_environment.py24
-rw-r--r--heat/tests/test_hot.py45
-rw-r--r--heat/tests/test_magnum_baymodel.py13
-rw-r--r--heat/tests/test_metadata_refresh.py7
-rw-r--r--heat/tests/test_parameters.py7
-rw-r--r--heat/tests/test_properties.py31
-rw-r--r--heat/tests/test_provider_template.py7
-rw-r--r--heat/tests/test_resource.py222
-rw-r--r--heat/tests/test_rpc_client.py3
-rw-r--r--heat/tests/test_software_deployment.py10
-rw-r--r--heat/tests/test_stack.py2
-rw-r--r--heat/tests/test_stack_delete.py2
-rw-r--r--heat_integrationtests/README.rst2
-rw-r--r--heat_integrationtests/functional/test_reload_on_sighup.py98
-rw-r--r--heat_integrationtests/functional/test_stack_tags.py10
-rw-r--r--heat_integrationtests/functional/test_validation.py7
-rwxr-xr-xheat_integrationtests/post_test_hook.sh4
-rwxr-xr-xheat_integrationtests/pre_test_hook.sh6
-rwxr-xr-xheat_integrationtests/prepare_test_network.sh4
-rw-r--r--heat_integrationtests/requirements.txt1
-rwxr-xr-xheat_upgradetests/post_test_hook.sh0
-rwxr-xr-xheat_upgradetests/pre_test_hook.sh0
-rw-r--r--requirements.txt10
-rw-r--r--setup.cfg2
-rw-r--r--tox.ini1
183 files changed, 3871 insertions, 2014 deletions
diff --git a/.gitignore b/.gitignore
index b3044b5b3..7c00b6a00 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,3 +20,4 @@ cover
.project
.pydevproject
doc/source/api/
+etc/heat/heat.conf.sample
diff --git a/bin/heat-api b/bin/heat-api
index 8ac4ab73d..62fcf9a93 100755
--- a/bin/heat-api
+++ b/bin/heat-api
@@ -33,6 +33,7 @@ if os.path.exists(os.path.join(possible_topdir, 'heat', '__init__.py')):
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
+from oslo_reports import guru_meditation_report as gmr
from oslo_service import systemd
import six
@@ -50,8 +51,8 @@ LOG = logging.getLogger('heat.api')
if __name__ == '__main__':
try:
logging.register_options(cfg.CONF)
- version = version.version_info.version_string()
- cfg.CONF(project='heat', prog='heat-api', version=version)
+ cfg.CONF(project='heat', prog='heat-api',
+ version=version.version_info.version_string())
logging.setup(cfg.CONF, 'heat-api')
messaging.setup()
@@ -62,8 +63,9 @@ if __name__ == '__main__':
LOG.info(_LI('Starting Heat ReST API on %(host)s:%(port)s'),
{'host': host, 'port': port})
profiler.setup('heat-api', host)
- server = wsgi.Server()
- server.start(app, cfg.CONF.heat_api, default_port=port)
+ gmr.TextGuruMeditation.setup_autorun(version)
+ server = wsgi.Server('heat-api', cfg.CONF.heat_api)
+ server.start(app, default_port=port)
systemd.notify_once()
server.wait()
except RuntimeError as e:
diff --git a/bin/heat-api-cfn b/bin/heat-api-cfn
index 7fb577d13..9a6ddd283 100755
--- a/bin/heat-api-cfn
+++ b/bin/heat-api-cfn
@@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'heat', '__init__.py')):
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
+from oslo_reports import guru_meditation_report as gmr
from oslo_service import systemd
import six
@@ -66,8 +67,9 @@ if __name__ == '__main__':
LOG.info(_LI('Starting Heat API on %(host)s:%(port)s'),
{'host': host, 'port': port})
profiler.setup('heat-api-cfn', host)
- server = wsgi.Server()
- server.start(app, cfg.CONF.heat_api_cfn, default_port=port)
+ gmr.TextGuruMeditation.setup_autorun(version)
+ server = wsgi.Server('heat-api-cfn', cfg.CONF.heat_api_cfn)
+ server.start(app, default_port=port)
systemd.notify_once()
server.wait()
except RuntimeError as e:
diff --git a/bin/heat-api-cloudwatch b/bin/heat-api-cloudwatch
index b7850214e..1f50ca48d 100755
--- a/bin/heat-api-cloudwatch
+++ b/bin/heat-api-cloudwatch
@@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'heat', '__init__.py')):
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
+from oslo_reports import guru_meditation_report as gmr
from oslo_service import systemd
import six
@@ -66,8 +67,10 @@ if __name__ == '__main__':
LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'),
{'host': host, 'port': port})
profiler.setup('heat-api-cloudwatch', host)
- server = wsgi.Server()
- server.start(app, cfg.CONF.heat_api_cloudwatch, default_port=port)
+ gmr.TextGuruMeditation.setup_autorun(version)
+ server = wsgi.Server('heat-api-cloudwatch',
+ cfg.CONF.heat_api_cloudwatch)
+ server.start(app, default_port=port)
systemd.notify_once()
server.wait()
except RuntimeError as e:
diff --git a/bin/heat-engine b/bin/heat-engine
index cf3b1c417..315df217c 100755
--- a/bin/heat-engine
+++ b/bin/heat-engine
@@ -35,6 +35,7 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'heat', '__init__.py')):
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
+from oslo_reports import guru_meditation_report as gmr
from oslo_service import service
from heat.common import config
@@ -51,8 +52,8 @@ LOG = logging.getLogger('heat.engine')
if __name__ == '__main__':
logging.register_options(cfg.CONF)
- version = version.version_info.version_string()
- cfg.CONF(project='heat', prog='heat-engine', version=version)
+ cfg.CONF(project='heat', prog='heat-engine',
+ version=version.version_info.version_string())
logging.setup(cfg.CONF, 'heat-engine')
logging.set_defaults()
messaging.setup()
@@ -70,6 +71,7 @@ if __name__ == '__main__':
from heat.engine import service as engine # noqa
profiler.setup('heat-engine', cfg.CONF.host)
+ gmr.TextGuruMeditation.setup_autorun(version)
srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC)
launcher = service.launch(cfg.CONF, srv,
workers=cfg.CONF.num_engine_workers)
diff --git a/devstack/upgrade/resources.sh b/devstack/upgrade/resources.sh
new file mode 100755
index 000000000..2a0d1bf95
--- /dev/null
+++ b/devstack/upgrade/resources.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+set -o errexit
+
+source $GRENADE_DIR/grenaderc
+source $GRENADE_DIR/functions
+
+source $TOP_DIR/openrc admin admin
+
+set -o xtrace
+
+HEAT_USER=heat_grenade
+HEAT_PROJECT=heat_grenade
+HEAT_PASS=pass
+
+function _heat_set_user {
+ OS_TENANT_NAME=$HEAT_PROJECT
+ OS_USERNAME=$HEAT_USER
+ OS_PASSWORD=$HEAT_PASS
+}
+
+function create {
+ # creates a tenant for the server
+ eval $(openstack project create -f shell -c id $HEAT_PROJECT)
+ if [[ -z "$id" ]]; then
+ die $LINENO "Didn't create $HEAT_PROJECT project"
+ fi
+ resource_save heat project_id $id
+
+ # creates the user, and sets $id locally
+ eval $(openstack user create $HEAT_USER \
+ --project $id \
+ --password $HEAT_PASS \
+ -f shell -c id)
+ if [[ -z "$id" ]]; then
+ die $LINENO "Didn't create $HEAT_USER user"
+ fi
+ resource_save heat user_id $id
+ _heat_set_user
+
+ local stack_name='grenadine'
+ resource_save heat stack_name $stack_name
+ local loc=`dirname $BASH_SOURCE`
+ heat stack-create -f $loc/templates/random_string.yaml $stack_name
+}
+
+function verify {
+ _heat_set_user
+ stack_name=$(resource_get heat stack_name)
+ heat stack-show $stack_name
+ # TODO(sirushtim): Create more granular checks for Heat.
+}
+
+function verify_noapi {
+ # TODO(sirushtim): Write tests to validate liveness of the resources
+ # it creates during possible API downtime.
+ :
+}
+
+function destroy {
+ _heat_set_user
+ heat stack-delete $(resource_get heat stack_name)
+
+ source $TOP_DIR/openrc admin admin
+ local user_id=$(resource_get heat user_id)
+ local project_id=$(resource_get heat project_id)
+ openstack user delete $user_id
+ openstack project delete $project_id
+}
+
+# Dispatcher
+case $1 in
+ "create")
+ create
+ ;;
+ "verify_noapi")
+ verify_noapi
+ ;;
+ "verify")
+ verify
+ ;;
+ "destroy")
+ destroy
+ ;;
+esac
diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings
new file mode 100644
index 000000000..9d05220e1
--- /dev/null
+++ b/devstack/upgrade/settings
@@ -0,0 +1,4 @@
+register_project_for_upgrade heat
+register_db_to_save heat
+devstack_localrc base enable_service h-api h-api-cfn h-api-cw h-eng heat tempest ceilometer-alarm-evaluator ceilometer-alarm-notifier ceilometer-anotification
+devstack_localrc target enable_service h-api h-api-cfn h-api-cw h-eng heat tempest ceilometer-alarm-evaluator ceilometer-alarm-notifier ceilometer-anotification
diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh
new file mode 100755
index 000000000..7e2f9ebb4
--- /dev/null
+++ b/devstack/upgrade/shutdown.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+set -o errexit
+
+source $GRENADE_DIR/grenaderc
+source $GRENADE_DIR/functions
+
+# We need base DevStack functions for this
+source $BASE_DEVSTACK_DIR/functions
+source $BASE_DEVSTACK_DIR/stackrc # needed for status directory
+source $BASE_DEVSTACK_DIR/lib/tls
+source $BASE_DEVSTACK_DIR/lib/heat
+
+set -o xtrace
+
+stop_heat
+
+SERVICES_DOWN="heat-api heat-engine heat-api-cfn heat-api-cloudwatch"
+
+# sanity check that services are actually down
+ensure_services_stopped $SERVICES_DOWN
diff --git a/devstack/upgrade/templates/random_string.yaml b/devstack/upgrade/templates/random_string.yaml
new file mode 100644
index 000000000..df1e5c3b0
--- /dev/null
+++ b/devstack/upgrade/templates/random_string.yaml
@@ -0,0 +1,4 @@
+heat_template_version: 2014-10-16
+resources:
+ random_string:
+ type: OS::Heat::RandomString
diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh
new file mode 100755
index 000000000..527512bde
--- /dev/null
+++ b/devstack/upgrade/upgrade.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# ``upgrade-heat``
+
+echo "*********************************************************************"
+echo "Begin $0"
+echo "*********************************************************************"
+
+# Clean up any resources that may be in use
+cleanup() {
+ set +o errexit
+
+ echo "*********************************************************************"
+ echo "ERROR: Abort $0"
+ echo "*********************************************************************"
+
+ # Kill ourselves to signal any calling process
+ trap 2; kill -2 $$
+}
+
+trap cleanup SIGHUP SIGINT SIGTERM
+
+# Keep track of the grenade directory
+RUN_DIR=$(cd $(dirname "$0") && pwd)
+
+# Source params
+source $GRENADE_DIR/grenaderc
+
+# Import common functions
+source $GRENADE_DIR/functions
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occurred.
+set -o errexit
+
+# Upgrade Heat
+# ============
+
+# Duplicate some setup bits from target DevStack
+source $TARGET_DEVSTACK_DIR/functions
+source $TARGET_DEVSTACK_DIR/stackrc
+source $TARGET_DEVSTACK_DIR/lib/tls
+source $TARGET_DEVSTACK_DIR/lib/stack
+source $TARGET_DEVSTACK_DIR/lib/heat
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+# Save current config files for posterity
+[[ -d $SAVE_DIR/etc.heat ]] || cp -pr $HEAT_CONF_DIR $SAVE_DIR/etc.heat
+
+# install_heat()
+stack_install_service heat
+install_heatclient
+install_heat_other
+
+# calls upgrade-heat for specific release
+upgrade_project heat $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH
+
+# Simulate init_heat()
+create_heat_cache_dir
+
+HEAT_BIN_DIR=$(dirname $(which heat-manage))
+$HEAT_BIN_DIR/heat-manage --config-file $HEAT_CONF db_sync || die $LINENO "DB sync error"
+
+# Start Heat
+start_heat
+
+# Don't succeed unless the services come up
+ensure_services_started heat-api heat-engine heat-api-cloudwatch heat-api-cfn
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End $0"
+echo "*********************************************************************"
diff --git a/doc/source/ext/resources.py b/doc/source/ext/resources.py
index 99ad6669a..607caec7f 100644
--- a/doc/source/ext/resources.py
+++ b/doc/source/ext/resources.py
@@ -75,6 +75,9 @@ class ResourcePages(compat.Directive):
self.resource_class.properties_schema)
self.attrs_schemata = attributes.schemata(
self.resource_class.attributes_schema)
+ self.update_policy_schemata = properties.schemata(
+ self.resource_class.update_policy_schema)
+
self._status_str(resource_class.support_status, section)
@@ -86,10 +89,9 @@ class ResourcePages(compat.Directive):
self.contribute_properties(section)
self.contribute_attributes(section)
+ self.contribute_update_policy(section)
self.contribute_hot_syntax(section)
- self.contribute_yaml_syntax(section)
- self.contribute_json_syntax(section)
return content
@@ -171,61 +173,6 @@ resources:
block = nodes.literal_block('', template, language="hot")
section.append(block)
- def contribute_yaml_syntax(self, parent):
- section = self._section(parent, _('YAML Syntax'), '%s-yaml')
- props = []
- for prop_key in sorted(six.iterkeys(self.props_schemata)):
- prop = self.props_schemata[prop_key]
- if (prop.implemented
- and prop.support_status.status == support.SUPPORTED):
- props.append('%s: %s' % (prop_key,
- self._prop_syntax_example(prop)))
-
- props_str = ''
- if props:
- props_str = '''\n Properties:
- %s''' % ('\n '.join(props))
-
- template = '''HeatTemplateFormatVersion: '2012-12-12'
-...
-Resources:
- ...
- TheResource:
- Type: %s%s''' % (self.resource_type, props_str)
-
- block = nodes.literal_block('', template, language='yaml')
- section.append(block)
-
- def contribute_json_syntax(self, parent):
- section = self._section(parent, _('JSON Syntax'), '%s-json')
-
- props = []
- for prop_key in sorted(six.iterkeys(self.props_schemata)):
- prop = self.props_schemata[prop_key]
- if (prop.implemented
- and prop.support_status.status == support.SUPPORTED):
- props.append('"%s": %s' % (prop_key,
- self._prop_syntax_example(prop)))
-
- props_str = ''
- if props:
- props_str = ''',\n "Properties": {
- %s
- }''' % (',\n '.join(props))
-
- template = '''{
- "AWSTemplateFormatVersion" : "2010-09-09",
- ...
- "Resources" : {
- "TheResource": {
- "Type": "%s"%s
- }
- }
-}''' % (self.resource_type, props_str)
-
- block = nodes.literal_block('', template, language="json")
- section.append(block)
-
@staticmethod
def cmp_prop(x, y):
x_key, x_prop = x
@@ -343,6 +290,16 @@ Resources:
def_para = nodes.paragraph('', description)
definition.append(def_para)
+ def contribute_update_policy(self, parent):
+ if not self.update_policy_schemata:
+ return
+ section = self._section(parent, _('UpdatePolicy'), '%s-updpolicy')
+ prop_list = nodes.definition_list()
+ section.append(prop_list)
+ for prop_key, prop in sorted(self.update_policy_schemata.items(),
+ self.cmp_prop):
+ self.contribute_property(prop_list, prop_key, prop)
+
class IntegrateResourcePages(ResourcePages):
@@ -404,6 +361,24 @@ def _load_all_resources():
all_resources[name] = [cls]
+def link_resource(app, env, node, contnode):
+ reftarget = node.attributes['reftarget']
+ for resource_name in all_resources:
+ if resource_name.lower() == reftarget.lower():
+ resource = all_resources[resource_name]
+ refnode = nodes.reference('', '', internal=True)
+ refnode['reftitle'] = resource_name
+ if resource_name.startswith('AWS'):
+ source = 'template_guide/cfn'
+ else:
+ source = 'template_guide/openstack'
+ uri = app.builder.get_relative_uri(
+ node.attributes['refdoc'], source)
+ refnode['refuri'] = '%s#%s' % (uri, resource_name)
+ refnode.append(contnode)
+ return refnode
+
+
def setup(app):
_load_all_resources()
app.add_node(integratedrespages)
@@ -417,3 +392,5 @@ def setup(app):
app.add_node(contribresourcepages)
app.add_directive('contribrespages', ContribResourcePages)
+
+ app.connect('missing-reference', link_resource)
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
index d6ec8b1eb..f94735ffd 100644
--- a/doc/source/glossary.rst
+++ b/doc/source/glossary.rst
@@ -76,10 +76,9 @@
Nova Instance metadata
User-provided *key:value* pairs associated with a Compute
- Instance. See `Instance specific data (OpenStack Compute Admin
- Guide)`_.
+ Instance. See `Instance specific data (OpenStack Operations Guide)`_.
- .. _Instance specific data (OpenStack Compute Admin Guide): http://docs.openstack.org/grizzly/openstack-compute/admin/content/instance-data.html#inserting_metadata
+ .. _Instance specific data (OpenStack Operations Guide): http://docs.openstack.org/openstack-ops/content/instances.html#instance_specific_data
OpenStack
Open source software for building private and public clouds.
@@ -169,7 +168,7 @@
configure instances at boot time. See also `User data (OpenStack
End User Guide)`_.
- .. _User data (OpenStack End User Guide): http://docs.openstack.org/user-guide/content/user-data.html#d6e2415
+ .. _User data (OpenStack End User Guide): http://docs.openstack.org/user-guide/cli_provide_user_data_to_instances.html
.. _cloud-init: https://help.ubuntu.com/community/CloudInit
Wait condition
diff --git a/doc/source/gmr.rst b/doc/source/gmr.rst
new file mode 100644
index 000000000..d2b164552
--- /dev/null
+++ b/doc/source/gmr.rst
@@ -0,0 +1,73 @@
+..
+ Copyright (c) 2014 OpenStack Foundation
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+Guru Meditation Reports
+=======================
+
+Heat contains a mechanism whereby developers and system administrators can generate a report about the state of a running Heat executable. This report is called a *Guru Meditation Report* (*GMR* for short).
+
+Generating a GMR
+----------------
+
+A *GMR* can be generated by sending the *USR1* signal to any Heat process with support (see below). The *GMR* will then be outputted standard error for that particular process.
+
+For example, suppose that ``heat-api`` has process id ``10172``, and was run with ``2>/var/log/heat/heat-api-err.log``. Then, ``kill -USR1 10172`` will trigger the Guru Meditation report to be printed to ``/var/log/heat/heat-api-err.log``.
+
+Structure of a GMR
+------------------
+
+The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections:
+
+Package
+ Shows information about the package to which this process belongs, including version information
+
+Threads
+ Shows stack traces and thread ids for each of the threads within this process
+
+Green Threads
+ Shows stack traces for each of the green threads within this process (green threads don't have thread ids)
+
+Configuration
+ Lists all the configuration options currently accessible via the CONF object for the current process
+
+Adding support for GMRs to new executable
+------------------------------------------
+
+Adding support for a *GMR* to a given executable is fairly easy.
+
+First import the module (currently residing in oslo-incubator), as well as the Heat version module:
+
+.. code-block:: python
+
+ from oslo_reports import guru_meditation_report as gmr
+ from heat import version
+
+Then, register any additional sections (optional):
+
+.. code-block:: python
+
+ TextGuruMeditation.register_section('Some Special Section',
+ some_section_generator)
+
+Finally (under main), before running the "main loop" of the executable (usually ``server.start()`` or something similar), register the *GMR* hook:
+
+.. code-block:: python
+
+ TextGuruMeditation.setup_autorun(version)
+
+Extending the GMR
+-----------------
+
+As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the documentation about oslo.reports: `oslo.reports <http://docs.openstack.org/developer/oslo.reports/>`_
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f525aa50b..5073e0ffb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -54,6 +54,7 @@ Developers Documentation
architecture
pluginguide
schedulerhints
+ gmr
API Documentation
========================
diff --git a/doc/source/template_guide/basic_resources.rst b/doc/source/template_guide/basic_resources.rst
index 1812241f8..cc1d706cf 100644
--- a/doc/source/template_guide/basic_resources.rst
+++ b/doc/source/template_guide/basic_resources.rst
@@ -17,7 +17,7 @@ Manage instances
Create an instance
------------------
-Use the :hotref:`OS::Nova::Server` resource to create a Compute instance. The
+Use the :ref:`OS::Nova::Server` resource to create a Compute instance. The
``flavor`` property is the only mandatory one, but you need to define a boot
source using one of the ``image`` or ``block_device_mapping`` properties.
@@ -42,19 +42,19 @@ connecting to the ``private`` network:
Connect an instance to a network
--------------------------------
-Use the ``networks`` property of an :hotref:`OS::Nova::Server` resource to
+Use the ``networks`` property of an :ref:`OS::Nova::Server` resource to
define which networks an instance should connect to. Define each network as a
YAML map, containing one of the following keys:
``port``
The ID of an existing Networking port. You usually create this port in the
- same template using an :hotref:`OS::Neutron::Port` resource. You will be
+ same template using an :ref:`OS::Neutron::Port` resource. You will be
able to associate a floating IP to this port, and the port to your Compute
instance.
``network``
The name or ID of an existing network. You don't need to create an
- :hotref:`OS::Neutron::Port` resource if you use this property, but you will
+ :ref:`OS::Neutron::Port` resource if you use this property, but you will
not be able to associate a floating IP with the instance interface in the
template.
@@ -91,10 +91,10 @@ properties:
Create and associate security groups to an instance
---------------------------------------------------
-Use the :hotref:`OS::Neutron::SecurityGroup` resource to create security
+Use the :ref:`OS::Neutron::SecurityGroup` resource to create security
groups.
-Define the ``security_groups`` property of the :hotref:`OS::Neutron::Port`
+Define the ``security_groups`` property of the :ref:`OS::Neutron::Port`
resource to associate security groups to a port, then associate the port to an
instance.
@@ -145,8 +145,8 @@ instances.
OS::Nova resources
++++++++++++++++++
-Use the :hotref:`OS::Nova::FloatingIP` resource to create a floating IP, and
-the :hotref:`OS::Nova::FloatingIPAssociation` resource to associate the
+Use the :ref:`OS::Nova::FloatingIP` resource to create a floating IP, and
+the :ref:`OS::Nova::FloatingIPAssociation` resource to associate the
floating IP to an instance.
The following example creates an instance and a floating IP, and associate the
@@ -181,8 +181,8 @@ OS::Neutron resources
The Networking service (neutron) must be enabled on your OpenStack
deployment to use these resources.
-Use the :hotref:`OS::Neutron::FloatingIP` resource to create a floating IP, and
-the :hotref:`OS::Neutron::FloatingIPAssociation` resource to associate the
+Use the :ref:`OS::Neutron::FloatingIP` resource to create a floating IP, and
+the :ref:`OS::Neutron::FloatingIPAssociation` resource to associate the
floating IP to a port:
.. code-block:: yaml
@@ -242,7 +242,7 @@ with stack updates.
Enable remote access to an instance
-----------------------------------
-The ``key_name`` attribute of the :hotref:`OS::Nova::Server` resource defines
+The ``key_name`` attribute of the :ref:`OS::Nova::Server` resource defines
the key pair to use to enable SSH remote access:
.. code-block:: yaml
@@ -257,11 +257,12 @@ the key pair to use to enable SSH remote access:
key_name: my_key
.. note::
- For more information about key pairs, see the :doc:`../cli_nova_configure_access_security_for_instances`.
+ For more information about key pairs, see
+ `Configure access and security for instances <http://docs.openstack.org/user-guide/configure_access_and_security_for_instances.html>`_.
Create a key pair
-----------------
-You can create new key pairs with the :hotref:`OS::Nova::KeyPair` resource. Key
+You can create new key pairs with the :ref:`OS::Nova::KeyPair` resource. Key
pairs can be imported or created during the stack creation.
If the ``public_key`` property is not specified, the Orchestration module
@@ -301,8 +302,8 @@ Create a network and a subnet
deployment to create and manage networks and subnets. Networks and subnets
cannot be created if your deployment uses legacy networking (nova-network).
-Use the :hotref:`OS::Neutron::Net` resource to create a network, and the
-:hotref:`OS::Neutron::Subnet` resource to provide a subnet for this network:
+Use the :ref:`OS::Neutron::Net` resource to create a network, and the
+:ref:`OS::Neutron::Subnet` resource to provide a subnet for this network:
.. code-block:: yaml
:linenos:
@@ -322,7 +323,7 @@ Use the :hotref:`OS::Neutron::Net` resource to create a network, and the
Create and manage a router
--------------------------
-Use the :hotref:`OS::Neutron::Router` resource to create a router. You can
+Use the :ref:`OS::Neutron::Router` resource to create a router. You can
define its gateway with the ``external_gateway_info`` property:
.. code-block:: yaml
@@ -335,7 +336,7 @@ define its gateway with the ``external_gateway_info`` property:
external_gateway_info: { network: public }
You can connect subnets to routers with the
-:hotref:`OS::Neutron::RouterInterface` resource:
+:ref:`OS::Neutron::RouterInterface` resource:
.. code-block:: yaml
:linenos:
@@ -389,7 +390,7 @@ Manage volumes
~~~~~~~~~~~~~~
Create a volume
---------------
-Use the :hotref:`OS::Cinder::Volume` resource to create a new Block Storage
+Use the :ref:`OS::Cinder::Volume` resource to create a new Block Storage
volume.
For example:
@@ -437,7 +438,7 @@ service uses the size of the backup to define the size of the new volume.
Attach a volume to an instance
------------------------------
-Use the :hotref:`OS::Cinder::VolumeAttachment` resource to attach a volume to
+Use the :ref:`OS::Cinder::VolumeAttachment` resource to attach a volume to
an instance.
The following example creates a volume and an instance, and attaches the volume
@@ -466,7 +467,7 @@ to the instance:
Boot an instance from a volume
------------------------------
-Use the ``block_device_mapping`` property of the :hotref:`OS::Nova::Server`
+Use the ``block_device_mapping`` property of the :ref:`OS::Nova::Server`
resource to define a volume used to boot the instance. This property is a list
of volumes to attach to the instance before its boot.
diff --git a/doc/source/template_guide/composition.rst b/doc/source/template_guide/composition.rst
index 6c7970053..98e980888 100644
--- a/doc/source/template_guide/composition.rst
+++ b/doc/source/template_guide/composition.rst
@@ -26,7 +26,7 @@ together using template resources. This is a mechanism to define a resource
using a template, thus composing one logical stack with multiple templates.
Template resources provide a feature similar to the
-:hotref:`AWS::CloudFormation::Stack` resource, but also provide a way to:
+:ref:`AWS::CloudFormation::Stack` resource, but also provide a way to:
* Define new resource types and build your own resource library.
* Override the default behaviour of existing resource types.
diff --git a/doc/source/template_guide/hot_spec.rst b/doc/source/template_guide/hot_spec.rst
index 7a91a34dd..d326e5053 100644
--- a/doc/source/template_guide/hot_spec.rst
+++ b/doc/source/template_guide/hot_spec.rst
@@ -171,6 +171,7 @@ For example, Heat currently supports the following values for the
digest
resource_facade
str_replace
+ str_split
@@ -1046,3 +1047,43 @@ In the example above, one can imagine that MySQL is being configured on a
compute instance and the root password is going to be set based on a user
provided parameter. The script for doing this is provided as userdata to the
compute instance, leveraging the ``str_replace`` function.
+
+str_split
+---------
+The *str_split* function allows for splitting a string into a list by providing
+an arbitrary delimiter, the opposite of list_join.
+
+The syntax of the str_split function is as follows:
+
+::
+
+ str_split:
+ - ','
+ - string,to,split
+Or:
+
+::
+
+ str_split: [',', 'string,to,split']
+
+The result of which is:
+
+::
+
+ ['string', 'to', 'split']
+
+Optionally, an index may be provided to select a specific entry from the
+resulting list, similar to get_attr/get_param:
+
+::
+
+ str_split: [',', 'string,to,split', 0]
+
+The result of which is:
+
+::
+
+ 'string'
+
+Note: The index starts at zero, and any value outside the maximum (e.g the
+length of the list minus one) will cause an error.
diff --git a/doc/source/template_guide/software_deployment.rst b/doc/source/template_guide/software_deployment.rst
index a7f430f77..cf0f5b724 100644
--- a/doc/source/template_guide/software_deployment.rst
+++ b/doc/source/template_guide/software_deployment.rst
@@ -137,7 +137,7 @@ script in a separate file:
Choosing the user_data_format
-----------------------------
-The :hotref:`OS::Nova::Server` ``user_data_format`` property determines how the
+The :ref:`OS::Nova::Server` ``user_data_format`` property determines how the
``user_data`` should be formatted for the server. For the default value
``HEAT_CFNTOOLS``, the ``user_data`` is bundled as part of the heat-cfntools
cloud-init boot configuration data. While ``HEAT_CFNTOOLS`` is the default
@@ -181,10 +181,10 @@ Often it is necessary to pause further creation of stack resources until the
boot configuration script has notified that it has reached a certain state.
This is usually either to notify that a service is now active, or to pass out
some generated data which is needed by another resource. The resources
-:hotref:`OS::Heat::WaitCondition` and :hotref:`OS::Heat::SwiftSignal` both perform
+:ref:`OS::Heat::WaitCondition` and :ref:`OS::Heat::SwiftSignal` both perform
this function using different techniques and tradeoffs.
-:hotref:`OS::Heat::WaitCondition` is implemented as a call to the
+:ref:`OS::Heat::WaitCondition` is implemented as a call to the
`Orchestration API`_ resource signal. The token is created using credentials
for a user account which is scoped only to the wait condition handle
resource. This user is created when the handle is created, and is associated
@@ -289,7 +289,7 @@ which builds a curl command with a valid token:
..
-:hotref:`OS::Heat::SwiftSignal` is implemented by creating an Object Storage
+:ref:`OS::Heat::SwiftSignal` is implemented by creating an Object Storage
API temporary URL which is populated with signal data with an HTTP PUT. The
orchestration service will poll this object until the signal data is available.
Object versioning is used to store multiple signals.
@@ -317,13 +317,13 @@ swift signal resources:
type: OS::Heat::SwiftSignalHandle
# ...
-The decision to use :hotref:`OS::Heat::WaitCondition` or
-:hotref:`OS::Heat::SwiftSignal` will depend on a few factors:
+The decision to use :ref:`OS::Heat::WaitCondition` or
+:ref:`OS::Heat::SwiftSignal` will depend on a few factors:
-* :hotref:`OS::Heat::SwiftSignal` depends on the availability of an Object
+* :ref:`OS::Heat::SwiftSignal` depends on the availability of an Object
Storage API
-* :hotref:`OS::Heat::WaitCondition` depends on whether the orchestration
+* :ref:`OS::Heat::WaitCondition` depends on whether the orchestration
service has been configured with a dedicated stack domain (which may depend
on the availability of an Identity V3 API).
@@ -341,7 +341,7 @@ existing software-config resource, so a stack-update which changes any
existing software-config resource will result in API calls to create a new
config and delete the old one.
-The resource :hotref:`OS::Heat::SoftwareConfig` is used for storing configs
+The resource :ref:`OS::Heat::SoftwareConfig` is used for storing configs
represented by text scripts, for example:
.. code-block:: yaml
@@ -364,7 +364,7 @@ represented by text scripts, for example:
user_data_format: RAW
user_data: {get_resource: boot_script}
-The resource :hotref:`OS::Heat::CloudConfig` allows Cloud-init_ cloud-config to
+The resource :ref:`OS::Heat::CloudConfig` allows Cloud-init_ cloud-config to
be represented as template YAML rather than a block string. This allows
intrinsic functions to be included when building the cloud-config. This also
ensures that the cloud-config is valid YAML, although no further checks for
@@ -394,8 +394,8 @@ valid cloud-config are done.
user_data_format: RAW
user_data: {get_resource: boot_config}
-The resource :hotref:`OS::Heat::MultipartMime` allows multiple
-:hotref:`OS::Heat::SoftwareConfig` and :hotref:`OS::Heat::CloudConfig`
+The resource :ref:`OS::Heat::MultipartMime` allows multiple
+:ref:`OS::Heat::SoftwareConfig` and :ref:`OS::Heat::CloudConfig`
resources to be combined into a single Cloud-init_ multi-part message:
.. code-block:: yaml
@@ -448,18 +448,18 @@ Software deployment resources
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There are many situations where it is not desirable to replace the server
whenever there is a configuration change. The
-:hotref:`OS::Heat::SoftwareDeployment` resource allows any number of software
+:ref:`OS::Heat::SoftwareDeployment` resource allows any number of software
configurations to be added or removed from a server throughout its life-cycle.
Building custom image for software deployments
----------------------------------------------
-:hotref:`OS::Heat::SoftwareConfig` resources are used to store software
-configuration, and a :hotref:`OS::Heat::SoftwareDeployment` resource is used
+:ref:`OS::Heat::SoftwareConfig` resources are used to store software
+configuration, and a :ref:`OS::Heat::SoftwareDeployment` resource is used
to associate a config resource with one server. The ``group`` attribute on
-:hotref:`OS::Heat::SoftwareConfig` specifies what tool will consume the
+:ref:`OS::Heat::SoftwareConfig` specifies what tool will consume the
config content.
-:hotref:`OS::Heat::SoftwareConfig` has the ability to define a schema of
+:ref:`OS::Heat::SoftwareConfig` has the ability to define a schema of
``inputs`` and which the configuration script supports. Inputs are mapped to
whatever concept the configuration tool has for assigning
variables/parameters.
@@ -468,7 +468,7 @@ Likewise, ``outputs`` are mapped to the tool's capability to export structured
data after configuration execution. For tools which do not support this,
outputs can always be written to a known file path for the hook to read.
-The :hotref:`OS::Heat::SoftwareDeployment` resource allows values to be
+The :ref:`OS::Heat::SoftwareDeployment` resource allows values to be
assigned to the config inputs, and the resource remains in an ``IN_PROGRESS``
state until the server signals to heat what (if any) output values were
generated by the config script.
@@ -686,15 +686,15 @@ example:
There are a number of things to note about this template example:
-* :hotref:`OS::Heat::StructuredConfig` is like
- :hotref:`OS::Heat::SoftwareConfig` except that the ``config`` property
+* :ref:`OS::Heat::StructuredConfig` is like
+ :ref:`OS::Heat::SoftwareConfig` except that the ``config`` property
contains structured YAML instead of text script. This is useful for a
number of other configuration tools including ansible, salt and
os-apply-config.
* ``cfn-init`` has no concept of inputs, so ``{get_input: bar}`` acts as a
placeholder which gets replaced with the
- :hotref:`OS::Heat::StructuredDeployment` ``input_values`` value when the
+ :ref:`OS::Heat::StructuredDeployment` ``input_values`` value when the
deployment resource is created.
* ``cfn-init`` has no concept of outputs, so specifying
diff --git a/heat/api/openstack/v1/resources.py b/heat/api/openstack/v1/resources.py
index f1d13b7b2..b0de2cac5 100644
--- a/heat/api/openstack/v1/resources.py
+++ b/heat/api/openstack/v1/resources.py
@@ -79,23 +79,34 @@ class ResourceController(object):
self.options = options
self.rpc_client = rpc_client.EngineClient()
- @util.identified_stack
- def index(self, req, identity):
- """
- Lists summary information for all resources
- """
-
- nested_depth = 0
- key = rpc_api.PARAM_NESTED_DEPTH
+ def _extract_to_param(self, req, rpc_param, extractor, default):
+ key = rpc_param
if key in req.params:
try:
- nested_depth = param_utils.extract_int(key, req.params[key])
+ return extractor(key, req.params[key])
except ValueError as e:
raise exc.HTTPBadRequest(six.text_type(e))
+ else:
+ return default
+
+ @util.identified_stack
+ def index(self, req, identity):
+ """
+ Lists information for all resources
+ """
+ nested_depth = self._extract_to_param(req,
+ rpc_api.PARAM_NESTED_DEPTH,
+ param_utils.extract_int,
+ default=0)
+ with_detail = self._extract_to_param(req,
+ rpc_api.PARAM_WITH_DETAIL,
+ param_utils.extract_bool,
+ default=False)
res_list = self.rpc_client.list_stack_resources(req.context,
identity,
- nested_depth)
+ nested_depth,
+ with_detail)
return {'resources': [format_resource(req, res) for res in res_list]}
diff --git a/heat/api/openstack/v1/stacks.py b/heat/api/openstack/v1/stacks.py
index 4f60a3336..5e99dcaa2 100644
--- a/heat/api/openstack/v1/stacks.py
+++ b/heat/api/openstack/v1/stacks.py
@@ -345,6 +345,16 @@ class StackController(object):
formatted_stack = stacks_view.format_stack(req, result)
return {'stack': formatted_stack}
+ def prepare_args(self, data):
+ args = data.args()
+ key = rpc_api.PARAM_TIMEOUT
+ if key in args:
+ args[key] = self._extract_int_param(key, args[key])
+ key = rpc_api.PARAM_TAGS
+ if args.get(key) is not None:
+ args[key] = self._extract_tags_param(args[key])
+ return args
+
@util.policy_enforce
def create(self, req, body):
"""
@@ -352,11 +362,7 @@ class StackController(object):
"""
data = InstantiationData(body)
- args = data.args()
- key = rpc_api.PARAM_TIMEOUT
- if key in args:
- args[key] = self._extract_int_param(key, args[key])
-
+ args = self.prepare_args(data)
result = self.rpc_client.create_stack(req.context,
data.stack_name(),
data.template(),
@@ -429,11 +435,7 @@ class StackController(object):
"""
data = InstantiationData(body)
- args = data.args()
- key = rpc_api.PARAM_TIMEOUT
- if key in args:
- args[key] = self._extract_int_param(key, args[key])
-
+ args = self.prepare_args(data)
self.rpc_client.update_stack(req.context,
identity,
data.template(),
@@ -451,11 +453,7 @@ class StackController(object):
"""
data = InstantiationData(body, patch=True)
- args = data.args()
- key = rpc_api.PARAM_TIMEOUT
- if key in args:
- args[key] = self._extract_int_param(key, args[key])
-
+ args = self.prepare_args(data)
self.rpc_client.update_stack(req.context,
identity,
data.template(),
diff --git a/heat/common/config.py b/heat/common/config.py
index ca85f9517..75b8791b9 100644
--- a/heat/common/config.py
+++ b/heat/common/config.py
@@ -326,9 +326,12 @@ def startup_sanity_check():
'"stack_domain_admin" and '
'"stack_domain_admin_password"'))
auth_key_len = len(cfg.CONF.auth_encryption_key)
- if auth_key_len not in [16, 24, 32]:
+ if auth_key_len in (16, 24):
+ LOG.warn(
+ _LW('Please update auth_encryption_key to be 32 characters.'))
+ elif auth_key_len != 32:
raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '
- 'length must be 16, 24 or 32'))
+ 'must be 32 characters'))
def list_opts():
diff --git a/heat/common/crypt.py b/heat/common/crypt.py
index 4fe904a6b..a73f32e12 100644
--- a/heat/common/crypt.py
+++ b/heat/common/crypt.py
@@ -12,56 +12,70 @@
# under the License.
import base64
+import sys
from Crypto.Cipher import AES
+from cryptography import fernet
from oslo_config import cfg
+from oslo_utils import encodeutils
from heat.common.i18n import _
from heat.openstack.common.crypto import utils
-
auth_opts = [
cfg.StrOpt('auth_encryption_key',
secret=True,
default='notgood but just long enough i t',
help=_('Key used to encrypt authentication info in the '
- 'database. Length of this key must be 16, 24 or 32 '
- 'characters.'))
+ 'database. Length of this key must be 32 characters.'))
]
cfg.CONF.register_opts(auth_opts)
-def encrypt(auth_info, encryption_key=None):
- if auth_info is None:
+def encrypt(value, encryption_key=None):
+ if value is None:
return None, None
-
- encryption_key = get_valid_encryption_key(encryption_key)
- sym = utils.SymmetricCrypto()
- res = sym.encrypt(encryption_key,
- auth_info, b64encode=True)
- return 'oslo_decrypt_v1', res
+ encryption_key = get_valid_encryption_key(encryption_key, fix_length=True)
+ sym = fernet.Fernet(encryption_key.encode('base64'))
+ res = sym.encrypt(encodeutils.safe_encode(value))
+ return 'cryptography_decrypt_v1', res
-def oslo_decrypt_v1(auth_info, encryption_key=None):
- if auth_info is None:
+def decrypt(method, data, encryption_key=None):
+ if method is None or data is None:
return None
+ decryptor = getattr(sys.modules[__name__], method)
+ value = decryptor(data, encryption_key)
+ if value is not None:
+ return encodeutils.safe_decode(value, 'utf-8')
+
+def oslo_decrypt_v1(value, encryption_key=None):
encryption_key = get_valid_encryption_key(encryption_key)
sym = utils.SymmetricCrypto()
return sym.decrypt(encryption_key,
- auth_info, b64decode=True)
+ value, b64decode=True)
+
+def cryptography_decrypt_v1(value, encryption_key=None):
+ encryption_key = get_valid_encryption_key(encryption_key, fix_length=True)
+ sym = fernet.Fernet(encryption_key.encode('base64'))
+ return sym.decrypt(encodeutils.safe_encode(value))
-def get_valid_encryption_key(encryption_key):
+
+def get_valid_encryption_key(encryption_key, fix_length=False):
if encryption_key is None:
- encryption_key = cfg.CONF.auth_encryption_key[:32]
- else:
- encryption_key = encryption_key[0:32]
- return encryption_key
+ encryption_key = cfg.CONF.auth_encryption_key
+
+ if fix_length and len(encryption_key) < 32:
+ # Backward compatible size
+ encryption_key = encryption_key * 2
+
+ return encryption_key[:32]
-def heat_decrypt(auth_info, encryption_key=None):
+def heat_decrypt(value, encryption_key=None):
"""Decrypt function for data that has been encrypted using an older
version of Heat.
Note: the encrypt function returns the function that is needed to
@@ -70,11 +84,8 @@ def heat_decrypt(auth_info, encryption_key=None):
function must still exist. So whilst it may seem that this function
is not referenced, it will be referenced from the database.
"""
- if auth_info is None:
- return None
-
encryption_key = get_valid_encryption_key(encryption_key)
- auth = base64.b64decode(auth_info)
+ auth = base64.b64decode(value)
iv = auth[:AES.block_size]
cipher = AES.new(encryption_key, AES.MODE_CFB, iv)
res = cipher.decrypt(auth[AES.block_size:])
diff --git a/heat/common/exception.py b/heat/common/exception.py
index 5bb97d56a..8ca63f036 100644
--- a/heat/common/exception.py
+++ b/heat/common/exception.py
@@ -518,3 +518,18 @@ class ObjectFieldInvalid(HeatException):
class KeystoneServiceNameConflict(HeatException):
msg_fmt = _("Keystone has more than one service with same name "
"%(service)s. Please use service id instead of name")
+
+
+class SIGHUPInterrupt(HeatException):
+ msg_fmt = _("System SIGHUP signal received.")
+
+
+class StackResourceUnavailable(StackValidationFailed):
+ message = _("Service %(service_name)s does not have required endpoint in "
+ "service catalog for the resource %(resource_name)s")
+
+ def __init__(self, service_name, resource_name):
+ super(StackResourceUnavailable, self).__init__(
+ message=self.message % dict(
+ service_name=service_name,
+ resource_name=resource_name))
diff --git a/heat/common/wsgi.py b/heat/common/wsgi.py
index 94691aa2d..cf9e3f3be 100644
--- a/heat/common/wsgi.py
+++ b/heat/common/wsgi.py
@@ -33,6 +33,8 @@ from eventlet.green import socket
from eventlet.green import ssl
import eventlet.greenio
import eventlet.wsgi
+import functools
+from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
@@ -77,7 +79,7 @@ api_opts = [
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
- cfg.IntOpt('workers', default=0,
+ cfg.IntOpt('workers', default=processutils.get_worker_count(),
help=_("Number of workers for Heat service."),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
@@ -85,6 +87,10 @@ api_opts = [
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs).')),
+ cfg.IntOpt('tcp_keepidle', default=600,
+ help=_('The value for the socket option TCP_KEEPIDLE. This is '
+ 'the time in seconds that the connection must be idle '
+ 'before TCP starts sending keepalive probes.')),
]
api_group = cfg.OptGroup('heat_api')
cfg.CONF.register_group(api_group)
@@ -119,6 +125,10 @@ api_cfn_opts = [
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs).')),
+ cfg.IntOpt('tcp_keepidle', default=600,
+ help=_('The value for the socket option TCP_KEEPIDLE. This is '
+ 'the time in seconds that the connection must be idle '
+ 'before TCP starts sending keepalive probes.')),
]
api_cfn_group = cfg.OptGroup('heat_api_cfn')
cfg.CONF.register_group(api_cfn_group)
@@ -153,6 +163,10 @@ api_cw_opts = [
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs.)')),
+ cfg.IntOpt('tcp_keepidle', default=600,
+ help=_('The value for the socket option TCP_KEEPIDLE. This is '
+ 'the time in seconds that the connection must be idle '
+ 'before TCP starts sending keepalive probes.')),
]
api_cw_group = cfg.OptGroup('heat_api_cloudwatch')
cfg.CONF.register_group(api_cw_group)
@@ -227,11 +241,9 @@ def get_socket(conf, default_port):
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
- sock = eventlet.listen(bind_addr, backlog=conf.backlog,
+ sock = eventlet.listen(bind_addr,
+ backlog=conf.backlog,
family=address_family)
- if use_ssl:
- sock = ssl.wrap_socket(sock, certfile=cert_file,
- keyfile=key_file)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
@@ -240,13 +252,6 @@ def get_socket(conf, default_port):
raise RuntimeError(_("Could not bind to %(bind_addr)s"
"after trying for 30 seconds")
% {'bind_addr': bind_addr})
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- # in my experience, sockets can hang around forever without keepalive
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
-
- # This option isn't available in the OS X version of eventlet
- if hasattr(socket, 'TCP_KEEPIDLE'):
- sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
return sock
@@ -265,53 +270,64 @@ class WritableLogger(object):
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
- def __init__(self, threads=1000):
+ def __init__(self, name, conf, threads=1000):
+ os.umask(0o27) # ensure files are created with the correct privileges
+ self._logger = logging.getLogger("eventlet.wsgi.server")
+ self._wsgi_logger = WritableLogger(self._logger)
+ self.name = name
self.threads = threads
- self.children = []
+ self.children = set()
+ self.stale_children = set()
self.running = True
+ self.pgid = os.getpid()
+ self.conf = conf
+ try:
+ os.setpgid(self.pgid, self.pgid)
+ except OSError:
+ self.pgid = 0
+
+ def kill_children(self, *args):
+ """Kills the entire process group."""
+ LOG.error(_LE('SIGTERM received'))
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ self.running = False
+ os.killpg(0, signal.SIGTERM)
+
+ def hup(self, *args):
+ """
+ Reloads configuration files with zero down time.
+ """
+ LOG.error(_LE('SIGHUP received'))
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
+ raise exception.SIGHUPInterrupt
- def start(self, application, conf, default_port):
+ def start(self, application, default_port):
"""
Run a WSGI server with the given application.
:param application: The application to run in the WSGI server
- :param conf: a cfg.ConfigOpts object
:param default_port: Port to bind to if none is specified in conf
"""
- def kill_children(*args):
- """Kills the entire process group."""
- LOG.error(_LE('SIGTERM received'))
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- self.running = False
- os.killpg(0, signal.SIGTERM)
-
- def hup(*args):
- """
- Shuts down the server(s), but allows running requests to complete
- """
- LOG.error(_LE('SIGHUP received'))
- signal.signal(signal.SIGHUP, signal.SIG_IGN)
- os.killpg(0, signal.SIGHUP)
- signal.signal(signal.SIGHUP, hup)
- eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line
+ eventlet.wsgi.MAX_HEADER_LINE = self.conf.max_header_line
self.application = application
- self.sock = get_socket(conf, default_port)
-
- os.umask(0o27) # ensure files are created with the correct privileges
- self._logger = logging.getLogger("eventlet.wsgi.server")
- self._wsgi_logger = WritableLogger(self._logger)
+ self.default_port = default_port
+ self.configure_socket()
+ self.start_wsgi()
- if conf.workers == 0:
+ def start_wsgi(self):
+ if self.conf.workers == 0:
# Useful for profiling, test, debug etc.
self.pool = eventlet.GreenPool(size=self.threads)
- self.pool.spawn_n(self._single_run, application, self.sock)
+ self.pool.spawn_n(self._single_run, self.application, self.sock)
return
- LOG.info(_LI("Starting %d workers"), conf.workers)
- signal.signal(signal.SIGTERM, kill_children)
- signal.signal(signal.SIGHUP, hup)
- while len(self.children) < conf.workers:
+ LOG.info(_LI("Starting %d workers"), self.conf.workers)
+ signal.signal(signal.SIGTERM, self.kill_children)
+ signal.signal(signal.SIGINT, self.kill_children)
+ signal.signal(signal.SIGHUP, self.hup)
+ while len(self.children) < self.conf.workers:
self.run_child()
def wait_on_children(self):
@@ -319,9 +335,8 @@ class Server(object):
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
- LOG.error(_LE('Removing dead child %s'), pid)
- self.children.remove(pid)
- self.run_child()
+ self._remove_children(pid)
+ self._verify_and_respawn_children(pid, status)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
@@ -329,10 +344,151 @@ class Server(object):
LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
os.killpg(0, signal.SIGTERM)
break
+ except exception.SIGHUPInterrupt:
+ self.reload()
+ continue
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
LOG.debug('Exited')
+ def configure_socket(self, old_conf=None, has_changed=None):
+ """
+ Ensure a socket exists and is appropriately configured.
+
+ This function is called on start up, and can also be
+ called in the event of a configuration reload.
+
+ When called for the first time a new socket is created.
+ If reloading and either bind_host or bind port have been
+ changed the existing socket must be closed and a new
+ socket opened (laws of physics).
+
+ In all other cases (bind_host/bind_port have not changed)
+ the existing socket is reused.
+
+ :param old_conf: Cached old configuration settings (if any)
+ :param has changed: callable to determine if a parameter has changed
+ """
+ # Do we need a fresh socket?
+ new_sock = (old_conf is None or (
+ has_changed('bind_host') or
+ has_changed('bind_port')))
+ # Will we be using https?
+ use_ssl = not (not self.conf.cert_file or not self.conf.key_file)
+ # Were we using https before?
+ old_use_ssl = (old_conf is not None and not (
+ not old_conf.get('key_file') or
+ not old_conf.get('cert_file')))
+ # Do we now need to perform an SSL wrap on the socket?
+ wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock)
+ # Do we now need to perform an SSL unwrap on the socket?
+ unwrap_sock = use_ssl is False and old_use_ssl is True
+
+ if new_sock:
+ self._sock = None
+ if old_conf is not None:
+ self.sock.close()
+ _sock = get_socket(self.conf, self.default_port)
+ _sock.setsockopt(socket.SOL_SOCKET,
+ socket.SO_REUSEADDR, 1)
+ # sockets can hang around forever without keepalive
+ _sock.setsockopt(socket.SOL_SOCKET,
+ socket.SO_KEEPALIVE, 1)
+ self._sock = _sock
+
+ if wrap_sock:
+ self.sock = ssl.wrap_socket(self._sock,
+ certfile=self.conf.cert_file,
+ keyfile=self.conf.key_file)
+
+ if unwrap_sock:
+ self.sock = self._sock
+
+ if new_sock and not use_ssl:
+ self.sock = self._sock
+
+ # Pick up newly deployed certs
+ if old_conf is not None and use_ssl is True and old_use_ssl is True:
+ if has_changed('cert_file'):
+ self.sock.certfile = self.conf.cert_file
+ if has_changed('key_file'):
+ self.sock.keyfile = self.conf.key_file
+
+ if new_sock or (old_conf is not None and has_changed('tcp_keepidle')):
+ # This option isn't available in the OS X version of eventlet
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
+ self.conf.tcp_keepidle)
+
+ if old_conf is not None and has_changed('backlog'):
+ self.sock.listen(self.conf.backlog)
+
+ def _remove_children(self, pid):
+ if pid in self.children:
+ self.children.remove(pid)
+ LOG.info(_LI('Removed dead child %s'), pid)
+ elif pid in self.stale_children:
+ self.stale_children.remove(pid)
+ LOG.info(_LI('Removed stale child %s'), pid)
+ else:
+ LOG.warn(_LW('Unrecognised child %s'), pid)
+
+ def _verify_and_respawn_children(self, pid, status):
+ if len(self.stale_children) == 0:
+ LOG.debug('No stale children')
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
+ LOG.error(_LE('Not respawning child %d, cannot '
+ 'recover from termination'), pid)
+ if not self.children and not self.stale_children:
+ LOG.info(
+ _LI('All workers have terminated. Exiting'))
+ self.running = False
+ else:
+ if len(self.children) < self.conf.workers:
+ self.run_child()
+
+ def stash_conf_values(self):
+ """
+ Make a copy of some of the current global CONF's settings.
+ Allows determining if any of these values have changed
+ when the config is reloaded.
+ """
+ conf = {}
+ conf['bind_host'] = self.conf.bind_host
+ conf['bind_port'] = self.conf.bind_port
+ conf['backlog'] = self.conf.backlog
+ conf['key_file'] = self.conf.key_file
+ conf['cert_file'] = self.conf.cert_file
+ return conf
+
+ def reload(self):
+ """
+ Reload and re-apply configuration settings
+
+ Existing child processes are sent a SIGHUP signal
+ and will exit after completing existing requests.
+ New child processes, which will have the updated
+ configuration, are spawned. This allows preventing
+ interruption to the service.
+ """
+ def _has_changed(old, new, param):
+ old = old.get(param)
+ new = getattr(new, param)
+ return (new != old)
+
+ old_conf = self.stash_conf_values()
+ has_changed = functools.partial(_has_changed, old_conf, self.conf)
+ cfg.CONF.reload_config_files()
+ os.killpg(self.pgid, signal.SIGHUP)
+ self.stale_children = self.children
+ self.children = set()
+
+ # Ensure any logging config changes are picked up
+ logging.setup(cfg.CONF, self.name)
+
+ self.configure_socket(old_conf, has_changed)
+ self.start_wsgi()
+
def wait(self):
"""Wait until all servers have completed running."""
try:
@@ -344,16 +500,32 @@ class Server(object):
pass
def run_child(self):
+ def child_hup(*args):
+ """Shuts down child processes, existing requests are handled."""
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
+ eventlet.wsgi.is_accepting = False
+ self.sock.close()
+
pid = os.fork()
if pid == 0:
- signal.signal(signal.SIGHUP, signal.SIG_DFL)
+ signal.signal(signal.SIGHUP, child_hup)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ # ignore the interrupt signal to avoid a race whereby
+ # a child worker receives the signal before the parent
+ # and is respawned unnecessarily as a result
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ # The child has no need to stash the unwrapped
+ # socket, and the reference prevents a clean
+ # exit on sighup
+ self._sock = None
self.run_server()
LOG.info(_LI('Child %d exiting normally'), os.getpid())
- return
+ # self.pool.waitall() is now called in wsgi's server so
+ # it's safe to exit here
+ sys.exit(0)
else:
LOG.info(_LI('Started child %s'), pid)
- self.children.append(pid)
+ self.children.add(pid)
def run_server(self):
"""Run a WSGI server."""
diff --git a/heat/db/sqlalchemy/api.py b/heat/db/sqlalchemy/api.py
index 5a574c35f..2a2b0d732 100644
--- a/heat/db/sqlalchemy/api.py
+++ b/heat/db/sqlalchemy/api.py
@@ -18,7 +18,6 @@ import sys
from oslo_config import cfg
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils
-from oslo_utils import encodeutils
from oslo_utils import timeutils
import osprofiler.sqlalchemy
import six
@@ -193,7 +192,7 @@ def resource_data_get_all(resource, data=None):
for res in data:
if res.redact:
- ret[res.key] = _decrypt(res.value, res.decrypt_method)
+ ret[res.key] = crypt.decrypt(res.decrypt_method, res.value)
else:
ret[res.key] = res.value
return ret
@@ -207,7 +206,7 @@ def resource_data_get(resource, key):
resource.id,
key)
if result.redact:
- return _decrypt(result.value, result.decrypt_method)
+ return crypt.decrypt(result.decrypt_method, result.value)
return result.value
@@ -241,22 +240,6 @@ def stack_tags_get(context, stack_id):
return result or None
-def _encrypt(value):
- if value is not None:
- return crypt.encrypt(value.encode('utf-8'))
- else:
- return None, None
-
-
-def _decrypt(enc_value, method):
- if method is None:
- return None
- decryptor = getattr(crypt, method)
- value = decryptor(enc_value)
- if value is not None:
- return six.text_type(value, 'utf-8')
-
-
def resource_data_get_by_key(context, resource_id, key):
"""Looks up resource_data by resource_id and key. Does not unencrypt
resource_data.
@@ -273,7 +256,7 @@ def resource_data_get_by_key(context, resource_id, key):
def resource_data_set(resource, key, value, redact=False):
"""Save resource's key/value pair to database."""
if redact:
- method, value = _encrypt(value)
+ method, value = crypt.encrypt(value)
else:
method = ''
try:
@@ -619,7 +602,7 @@ def user_creds_create(context):
values = context.to_dict()
user_creds_ref = models.UserCreds()
if values.get('trust_id'):
- method, trust_id = _encrypt(values.get('trust_id'))
+ method, trust_id = crypt.encrypt(values.get('trust_id'))
user_creds_ref.trust_id = trust_id
user_creds_ref.decrypt_method = method
user_creds_ref.trustor_user_id = values.get('trustor_user_id')
@@ -631,7 +614,7 @@ def user_creds_create(context):
user_creds_ref.region_name = values.get('region_name')
else:
user_creds_ref.update(values)
- method, password = _encrypt(values['password'])
+ method, password = crypt.encrypt(values['password'])
if len(six.text_type(password)) > 255:
raise exception.Error(_("Length of OS_PASSWORD after encryption"
" exceeds Heat limit (255 chars)"))
@@ -649,8 +632,10 @@ def user_creds_get(user_creds_id):
# or it can be committed back to the DB in decrypted form
result = dict(db_result)
del result['decrypt_method']
- result['password'] = _decrypt(result['password'], db_result.decrypt_method)
- result['trust_id'] = _decrypt(result['trust_id'], db_result.decrypt_method)
+ result['password'] = crypt.decrypt(
+ db_result.decrypt_method, result['password'])
+ result['trust_id'] = crypt.decrypt(
+ db_result.decrypt_method, result['trust_id'])
return result
@@ -1155,8 +1140,7 @@ def db_encrypt_parameters_and_properties(ctxt, encryption_key):
except KeyError:
param_val = param.default
- encoded_val = encodeutils.safe_encode(param_val)
- encrypted_val = crypt.encrypt(encoded_val, encryption_key)
+ encrypted_val = crypt.encrypt(param_val, encryption_key)
raw_template.environment['parameters'][param_name] = \
encrypted_val
encrypted_params.append(param_name)
@@ -1179,17 +1163,10 @@ def db_decrypt_parameters_and_properties(ctxt, encryption_key):
encrypted_params = raw_template.environment[
'encrypted_param_names']
for param_name in encrypted_params:
- decrypt_function_name = parameters[param_name][0]
- decrypt_function = getattr(crypt, decrypt_function_name)
- decrypted_val = decrypt_function(parameters[param_name][1],
- encryption_key)
- try:
- parameters[param_name] = encodeutils.safe_decode(
- decrypted_val)
- except UnicodeDecodeError:
- # if the incorrect encryption_key was used then we can get
- # total gibberish here and safe_decode() will freak out.
- parameters[param_name] = decrypted_val
+ method, value = parameters[param_name]
+ decrypted_val = crypt.decrypt(method, value, encryption_key)
+ parameters[param_name] = decrypted_val
+
environment = raw_template.environment.copy()
environment['encrypted_param_names'] = []
raw_template_update(ctxt, raw_template.id,
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/016_timeout_nullable.py b/heat/db/sqlalchemy/migrate_repo/versions/016_timeout_nullable.py
deleted file mode 100644
index 3f136f7aa..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/016_timeout_nullable.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
-
- stack = sqlalchemy.Table('stack', meta, autoload=True)
- stack.c.timeout.alter(nullable=True)
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/017_event_state_status.py b/heat/db/sqlalchemy/migrate_repo/versions/017_event_state_status.py
deleted file mode 100644
index 8431acf48..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/017_event_state_status.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
-
- event = sqlalchemy.Table('event', meta, autoload=True)
- # Currently there is a 'name' column which really holds the
- # resource status, so rename it and add a separate action column
- # action is e.g "CREATE" and status is e.g "IN_PROGRESS"
- event.c.name.alter(name='resource_status')
- sqlalchemy.Column('resource_action', sqlalchemy.String(255)).create(event)
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/018_resource_id_uuid.py b/heat/db/sqlalchemy/migrate_repo/versions/018_resource_id_uuid.py
deleted file mode 100644
index 12b531e16..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/018_resource_id_uuid.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- resource = sqlalchemy.Table('resource', meta, autoload=True)
-
- resource.c.id.alter(sqlalchemy.String(36), primary_key=True,
- default=lambda: str(uuid.uuid4()))
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/019_resource_action_status.py b/heat/db/sqlalchemy/migrate_repo/versions/019_resource_action_status.py
deleted file mode 100644
index 3ad176106..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/019_resource_action_status.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- resource = sqlalchemy.Table('resource', meta, autoload=True)
- # Align the current state/state_description with the
- # action/status now used in the event table
- action = sqlalchemy.Column('action',
- sqlalchemy.String(length=255))
- action.create(resource)
- resource.c.state.alter(name='status')
- resource.c.state_description.alter(name='status_reason')
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/020_stack_action.py b/heat/db/sqlalchemy/migrate_repo/versions/020_stack_action.py
deleted file mode 100644
index 8f9a7e633..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/020_stack_action.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- stack = sqlalchemy.Table('stack', meta, autoload=True)
- # Align with action/status now used in the event/resource tables
- action = sqlalchemy.Column('action',
- sqlalchemy.String(length=255))
- action.create(stack)
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/021_resource_data.py b/heat/db/sqlalchemy/migrate_repo/versions/021_resource_data.py
deleted file mode 100644
index 6c6b9046f..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/021_resource_data.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
-
- resource_data = sqlalchemy.Table(
- 'resource_data', meta,
- sqlalchemy.Column('id',
- sqlalchemy.Integer,
- primary_key=True,
- nullable=False),
- sqlalchemy.Column('created_at', sqlalchemy.DateTime),
- sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
- sqlalchemy.Column('key', sqlalchemy.String(255)),
- sqlalchemy.Column('value', sqlalchemy.Text),
- sqlalchemy.Column('redact', sqlalchemy.Boolean),
- sqlalchemy.Column('resource_id',
- sqlalchemy.String(36),
- sqlalchemy.ForeignKey('resource.id'),
- nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
- sqlalchemy.Table('resource', meta, autoload=True)
- resource_data.create()
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py b/heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py
deleted file mode 100644
index 6cbec067a..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/022_stack_event_soft_delete.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- stack = sqlalchemy.Table('stack', meta, autoload=True)
- sqlalchemy.Column('deleted_at', sqlalchemy.DateTime).create(stack)
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/023_raw_template_mysql_longtext.py b/heat/db/sqlalchemy/migrate_repo/versions/023_raw_template_mysql_longtext.py
deleted file mode 100644
index ed73262c0..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/023_raw_template_mysql_longtext.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-from sqlalchemy.dialects import mysql
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name != 'mysql':
- return
-
- meta = sqlalchemy.MetaData(bind=migrate_engine)
- raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
- raw_template.c.template.alter(type=mysql.LONGTEXT())
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/024_event_resource_name.py b/heat/db/sqlalchemy/migrate_repo/versions/024_event_resource_name.py
deleted file mode 100644
index 6386dfd87..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/024_event_resource_name.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
- event = sqlalchemy.Table('event', meta, autoload=True)
- event.c.logical_resource_id.alter(name='resource_name')
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/025_user_creds_drop_service.py b/heat/db/sqlalchemy/migrate_repo/versions/025_user_creds_drop_service.py
deleted file mode 100644
index 01b9d56d1..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/025_user_creds_drop_service.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
-
- user_creds.c.service_user.drop()
- user_creds.c.service_password.drop()
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/026_user_creds_drop_aws.py b/heat/db/sqlalchemy/migrate_repo/versions/026_user_creds_drop_aws.py
deleted file mode 100644
index 799c95098..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/026_user_creds_drop_aws.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
-
- user_creds.c.aws_creds.drop()
- user_creds.c.aws_auth_url.drop()
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/027_user_creds_trusts.py b/heat/db/sqlalchemy/migrate_repo/versions/027_user_creds_trusts.py
deleted file mode 100644
index 73607f301..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/027_user_creds_trusts.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
-
- # keystone IDs are 32 characters long, but the keystone DB schema
- # specifies varchar(64) so align with that here, for the trust_id
- # we encrypt it, so align with the 255 chars allowed for password
- trustor_user_id = sqlalchemy.Column('trustor_user_id',
- sqlalchemy.String(length=64))
- trust_id = sqlalchemy.Column('trust_id', sqlalchemy.String(length=255))
- trustor_user_id.create(user_creds)
- trust_id.create(user_creds)
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/015_grizzly.py b/heat/db/sqlalchemy/migrate_repo/versions/028_havana.py
index 9de16555b..b0989021d 100644
--- a/heat/db/sqlalchemy/migrate_repo/versions/015_grizzly.py
+++ b/heat/db/sqlalchemy/migrate_repo/versions/028_havana.py
@@ -11,8 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import uuid
+
import sqlalchemy
+from heat.db.sqlalchemy import types
+
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
@@ -24,7 +28,7 @@ def upgrade(migrate_engine):
nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
- sqlalchemy.Column('template', sqlalchemy.Text),
+ sqlalchemy.Column('template', types.LongText),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@@ -37,13 +41,11 @@ def upgrade(migrate_engine):
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('username', sqlalchemy.String(255)),
sqlalchemy.Column('password', sqlalchemy.String(255)),
- sqlalchemy.Column('service_user', sqlalchemy.String(255)),
- sqlalchemy.Column('service_password', sqlalchemy.String(255)),
sqlalchemy.Column('tenant', sqlalchemy.String(1024)),
sqlalchemy.Column('auth_url', sqlalchemy.Text),
- sqlalchemy.Column('aws_auth_url', sqlalchemy.Text),
sqlalchemy.Column('tenant_id', sqlalchemy.String(256)),
- sqlalchemy.Column('aws_creds', sqlalchemy.Text),
+ sqlalchemy.Column('trust_id', sqlalchemy.String(255)),
+ sqlalchemy.Column('trustor_user_id', sqlalchemy.String(64)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@@ -54,6 +56,7 @@ def upgrade(migrate_engine):
primary_key=True, nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
+ sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
sqlalchemy.Column('name', sqlalchemy.String(255)),
sqlalchemy.Column('raw_template_id',
sqlalchemy.Integer,
@@ -64,10 +67,11 @@ def upgrade(migrate_engine):
nullable=False),
sqlalchemy.Column('username', sqlalchemy.String(256)),
sqlalchemy.Column('owner_id', sqlalchemy.String(36)),
+ sqlalchemy.Column('action', sqlalchemy.String(255)),
sqlalchemy.Column('status', sqlalchemy.String(255)),
sqlalchemy.Column('status_reason', sqlalchemy.String(255)),
- sqlalchemy.Column('parameters', sqlalchemy.Text),
- sqlalchemy.Column('timeout', sqlalchemy.Integer, nullable=False),
+ sqlalchemy.Column('parameters', types.LongText),
+ sqlalchemy.Column('timeout', sqlalchemy.Integer),
sqlalchemy.Column('tenant', sqlalchemy.String(256)),
sqlalchemy.Column('disable_rollback', sqlalchemy.Boolean,
nullable=False),
@@ -77,17 +81,35 @@ def upgrade(migrate_engine):
resource = sqlalchemy.Table(
'resource', meta,
- sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
- nullable=False),
+ sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
+ default=lambda: str(uuid.uuid4())),
sqlalchemy.Column('nova_instance', sqlalchemy.String(255)),
sqlalchemy.Column('name', sqlalchemy.String(255)),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
- sqlalchemy.Column('state', sqlalchemy.String(255)),
- sqlalchemy.Column('state_description', sqlalchemy.String(255)),
+ sqlalchemy.Column('action', sqlalchemy.String(255)),
+ sqlalchemy.Column('status', sqlalchemy.String(255)),
+ sqlalchemy.Column('status_reason', sqlalchemy.String(255)),
sqlalchemy.Column('stack_id', sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'), nullable=False),
- sqlalchemy.Column('rsrc_metadata', sqlalchemy.Text),
+ sqlalchemy.Column('rsrc_metadata', types.LongText),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
+ resource_data = sqlalchemy.Table(
+ 'resource_data', meta,
+ sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
+ nullable=False),
+ sqlalchemy.Column('created_at', sqlalchemy.DateTime),
+ sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
+ sqlalchemy.Column('key', sqlalchemy.String(255)),
+ sqlalchemy.Column('value', sqlalchemy.Text),
+ sqlalchemy.Column('redact', sqlalchemy.Boolean),
+ sqlalchemy.Column('resource_id',
+ sqlalchemy.String(36),
+ sqlalchemy.ForeignKey('resource.id'),
+ nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@@ -100,8 +122,9 @@ def upgrade(migrate_engine):
sqlalchemy.ForeignKey('stack.id'), nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
- sqlalchemy.Column('name', sqlalchemy.String(255)),
- sqlalchemy.Column('logical_resource_id', sqlalchemy.String(255)),
+ sqlalchemy.Column('resource_action', sqlalchemy.String(255)),
+ sqlalchemy.Column('resource_status', sqlalchemy.String(255)),
+ sqlalchemy.Column('resource_name', sqlalchemy.String(255)),
sqlalchemy.Column('physical_resource_id', sqlalchemy.String(255)),
sqlalchemy.Column('resource_status_reason', sqlalchemy.String(255)),
sqlalchemy.Column('resource_type', sqlalchemy.String(255)),
@@ -118,7 +141,7 @@ def upgrade(migrate_engine):
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('name', sqlalchemy.String(255)),
sqlalchemy.Column('state', sqlalchemy.String(255)),
- sqlalchemy.Column('rule', sqlalchemy.Text),
+ sqlalchemy.Column('rule', types.LongText),
sqlalchemy.Column('last_evaluated', sqlalchemy.DateTime),
sqlalchemy.Column('stack_id', sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'), nullable=False),
@@ -132,7 +155,7 @@ def upgrade(migrate_engine):
nullable=False),
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
- sqlalchemy.Column('data', sqlalchemy.Text),
+ sqlalchemy.Column('data', types.LongText),
sqlalchemy.Column('watch_rule_id', sqlalchemy.Integer,
sqlalchemy.ForeignKey('watch_rule.id'),
nullable=False),
@@ -145,6 +168,7 @@ def upgrade(migrate_engine):
user_creds,
stack,
resource,
+ resource_data,
event,
watch_rule,
watch_data,
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/028_text_mysql_longtext.py b/heat/db/sqlalchemy/migrate_repo/versions/028_text_mysql_longtext.py
deleted file mode 100644
index 20fb6dce1..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/028_text_mysql_longtext.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-from sqlalchemy.dialects import mysql
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name != 'mysql':
- return
-
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- stack = sqlalchemy.Table('stack', meta, autoload=True)
- stack.c.parameters.alter(type=mysql.LONGTEXT())
-
- resource = sqlalchemy.Table('resource', meta, autoload=True)
- resource.c.rsrc_metadata.alter(type=mysql.LONGTEXT())
-
- watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
- watch_rule.c.rule.alter(type=mysql.LONGTEXT())
-
- watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
- watch_data.c.data.alter(type=mysql.LONGTEXT())
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/057_resource_uuid_to_id.py b/heat/db/sqlalchemy/migrate_repo/versions/057_resource_uuid_to_id.py
index 0cef59da8..f064f9901 100644
--- a/heat/db/sqlalchemy/migrate_repo/versions/057_resource_uuid_to_id.py
+++ b/heat/db/sqlalchemy/migrate_repo/versions/057_resource_uuid_to_id.py
@@ -145,11 +145,6 @@ def upgrade_resource(migrate_engine):
constraint_kwargs['name'] = 'uniq_resource0uuid0'
cons = constraint.UniqueConstraint('uuid', **constraint_kwargs)
cons.create()
- if migrate_engine.name == 'postgresql':
- # resource_id_seq will be dropped in the case of removing `id` column
- # set owner to none for saving this sequence (it is needed in the
- # earlier migration)
- migrate_engine.execute('alter sequence resource_id_seq owned by none')
res_table.c.id.drop()
diff --git a/heat/db/sqlalchemy/migration.py b/heat/db/sqlalchemy/migration.py
index 74680886e..0179f3d0b 100644
--- a/heat/db/sqlalchemy/migration.py
+++ b/heat/db/sqlalchemy/migration.py
@@ -16,7 +16,7 @@ import os
from oslo_db.sqlalchemy import migration as oslo_migration
-INIT_VERSION = 14
+INIT_VERSION = 27
def db_sync(engine, version=None):
diff --git a/heat/engine/attributes.py b/heat/engine/attributes.py
index 41e2c5e87..477b91247 100644
--- a/heat/engine/attributes.py
+++ b/heat/engine/attributes.py
@@ -13,6 +13,7 @@
import collections
+from oslo_utils import strutils
import six
from heat.common.i18n import _
@@ -48,9 +49,9 @@ class Schema(constr.Schema):
)
TYPES = (
- STRING, MAP, LIST, INTEGER
+ STRING, MAP, LIST, INTEGER, BOOLEAN
) = (
- 'String', 'Map', 'List', 'Integer'
+ 'String', 'Map', 'List', 'Integer', 'Boolean'
)
def __init__(self, description=None,
@@ -191,6 +192,13 @@ class Attributes(collections.Mapping):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.INTEGER})
+ elif attrib.schema.type == attrib.schema.BOOLEAN:
+ try:
+ strutils.bool_from_string(value, strict=True)
+ except ValueError:
+ LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"),
+ {'name': attrib.name,
+ 'att_type': attrib.schema.BOOLEAN})
def __getitem__(self, key):
if key not in self:
diff --git a/heat/engine/clients/__init__.py b/heat/engine/clients/__init__.py
index 9dc9c7b7d..e3aa76a12 100644
--- a/heat/engine/clients/__init__.py
+++ b/heat/engine/clients/__init__.py
@@ -91,7 +91,7 @@ class ClientBackend(object):
context)
except (ImportError, RuntimeError) as err:
msg = _('Invalid cloud_backend setting in heat.conf '
- 'detected - %s'), six.text_type(err)
+ 'detected - %s') % six.text_type(err)
LOG.error(msg)
raise exception.Invalid(reason=msg)
diff --git a/heat/engine/clients/client_plugin.py b/heat/engine/clients/client_plugin.py
index 9dc8b8b28..a8141d206 100644
--- a/heat/engine/clients/client_plugin.py
+++ b/heat/engine/clients/client_plugin.py
@@ -205,3 +205,15 @@ class ClientPlugin(object):
return args
# FIXME(kanagaraj-manickam) Update other client plugins to leverage
# this method (bug 1461041)
+
+ def does_endpoint_exist(self,
+ service_type,
+ service_name):
+ endpoint_type = self._get_client_option(service_name,
+ 'endpoint_type')
+ try:
+ self.url_for(service_type=service_type,
+ endpoint_type=endpoint_type)
+ return True
+ except exceptions.EndpointNotFound:
+ return False
diff --git a/heat/engine/clients/os/designate.py b/heat/engine/clients/os/designate.py
index 053cfd6f8..bb81604f2 100644
--- a/heat/engine/clients/os/designate.py
+++ b/heat/engine/clients/os/designate.py
@@ -11,21 +11,50 @@
# License for the specific language governing permissions and limitations
# under the License.
-from designateclient import client
from designateclient import exceptions
+from designateclient import v1 as client
+from heat.common import exception as heat_exception
from heat.engine.clients import client_plugin
+from heat.engine import constraints
class DesignateClientPlugin(client_plugin.ClientPlugin):
exceptions_module = [exceptions]
+ service_types = ['dns']
+
def _create(self):
args = self._get_client_args(service_name='designate',
- service_type='dns')
+ service_type=self.service_types[0])
- return client.client('1', **args)
+ return client.Client(auth_url=args['auth_url'],
+ project_id=args['project_id'],
+ token=args['token'](),
+ endpoint=args['os_endpoint'],
+ cacert=args['cacert'],
+ insecure=args['insecure'])
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
+
+ def get_domain_id(self, domain_id_or_name):
+ try:
+ domain_obj = self.client().domains.get(domain_id_or_name)
+ return domain_obj.id
+ except exceptions.NotFound:
+ for domain in self.client().domains.list():
+ if domain.name == domain_id_or_name:
+ return domain.id
+
+ raise heat_exception.EntityNotFound(entity='Designate Domain',
+ name=domain_id_or_name)
+
+
+class DesignateDomainConstraint(constraints.BaseCustomConstraint):
+
+ expected_exceptions = (heat_exception.EntityNotFound,)
+
+ def validate_with_client(self, client, domain):
+ client.client_plugin('designate').get_domain_id(domain)
diff --git a/heat/engine/clients/os/nova.py b/heat/engine/clients/os/nova.py
index 7fd12b960..3f8218e52 100644
--- a/heat/engine/clients/os/nova.py
+++ b/heat/engine/clients/os/nova.py
@@ -432,7 +432,10 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
If that's the case, confirm the resize, if not raise an error.
"""
self.refresh_server(server)
- while server.status == 'RESIZE':
+ # resize operation is asynchronous so the server resize may not start
+ # when checking server status (the server may stay ACTIVE instead
+ # of RESIZE).
+ while server.status in ('RESIZE', 'ACTIVE'):
yield
self.refresh_server(server)
if server.status == 'VERIFY_RESIZE':
diff --git a/heat/engine/environment.py b/heat/engine/environment.py
index 345b8f67f..5527078f4 100644
--- a/heat/engine/environment.py
+++ b/heat/engine/environment.py
@@ -192,9 +192,11 @@ class ResourceRegistry(object):
def load(self, json_snippet):
self._load_registry([], json_snippet)
- def register_class(self, resource_type, resource_class):
- ri = ResourceInfo(self, [resource_type], resource_class)
- self._register_info([resource_type], ri)
+ def register_class(self, resource_type, resource_class, path=None):
+ if path is None:
+ path = [resource_type]
+ ri = ResourceInfo(self, path, resource_class)
+ self._register_info(path, ri)
def _load_registry(self, path, registry):
for k, v in iter(registry.items()):
@@ -436,7 +438,7 @@ class ResourceRegistry(object):
return _as_dict(self._registry)
- def get_types(self, support_status):
+ def get_types(self, cnxt=None, support_status=None):
'''Return a list of valid resource types.'''
def is_resource(key):
@@ -448,8 +450,16 @@ class ResourceRegistry(object):
cls.get_class().support_status.status ==
support_status.encode())
+ def is_available(cls):
+ if cnxt is None:
+ return True
+
+ return cls.get_class().is_service_available(cnxt)
+
return [name for name, cls in six.iteritems(self._registry)
- if is_resource(name) and status_matches(cls)]
+ if (is_resource(name) and
+ status_matches(cls) and
+ is_available(cls))]
class Environment(object):
@@ -515,8 +525,8 @@ class Environment(object):
env_fmt.PARAMETER_DEFAULTS: self.param_defaults,
env_fmt.ENCRYPTED_PARAM_NAMES: self.encrypted_param_names}
- def register_class(self, resource_type, resource_class):
- self.registry.register_class(resource_type, resource_class)
+ def register_class(self, resource_type, resource_class, path=None):
+ self.registry.register_class(resource_type, resource_class, path=path)
def register_constraint(self, constraint_name, constraint):
self.constraints[constraint_name] = constraint
@@ -529,8 +539,8 @@ class Environment(object):
def get_class(self, resource_type, resource_name=None):
return self.registry.get_class(resource_type, resource_name)
- def get_types(self, support_status=None):
- return self.registry.get_types(support_status)
+ def get_types(self, cnxt=None, support_status=None):
+ return self.registry.get_types(cnxt, support_status)
def get_resource_info(self, resource_type, resource_name=None,
registry_type=None):
diff --git a/heat/engine/hot/functions.py b/heat/engine/hot/functions.py
index 29a1529fe..0d5f8f3c3 100644
--- a/heat/engine/hot/functions.py
+++ b/heat/engine/hot/functions.py
@@ -424,3 +424,66 @@ class Digest(function.Function):
self.validate_usage(args)
return self.digest(*args)
+
+
+class StrSplit(function.Function):
+ '''
+ A function for splitting delimited strings into a list
+ and optionally extracting a specific list member by index.
+
+ Takes the form::
+
+ str_split: [delimiter, string, <index> ]
+
+ or::
+
+ str_split:
+ - delimiter
+ - string
+ - <index>
+ If <index> is specified, the specified list item will be returned
+ otherwise, the whole list is returned, similar to get_attr with
+ path based attributes accessing lists.
+ '''
+
+ def __init__(self, stack, fn_name, args):
+ super(StrSplit, self).__init__(stack, fn_name, args)
+ example = '"%s" : [ ",", "apples,pears", <index>]' % fn_name
+ self.fmt_data = {'fn_name': fn_name,
+ 'example': example}
+ self.fn_name = fn_name
+
+ if isinstance(args, (six.string_types, collections.Mapping)):
+ raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
+ 'should be: %(example)s') % self.fmt_data)
+
+ def result(self):
+ args = function.resolve(self.args)
+
+ try:
+ delim = args.pop(0)
+ str_to_split = args.pop(0)
+ except (AttributeError, IndexError):
+ raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
+ 'should be: %(example)s') % self.fmt_data)
+ split_list = str_to_split.split(delim)
+
+ # Optionally allow an index to be specified
+ if args:
+ try:
+ index = int(args.pop(0))
+ except ValueError:
+ raise ValueError(_('Incorrect index to "%(fn_name)s" '
+ 'should be: %(example)s') % self.fmt_data)
+ else:
+ try:
+ res = split_list[index]
+ except IndexError:
+ raise ValueError(_('Incorrect index to "%(fn_name)s" '
+ 'should be between 0 and '
+ '%(max_index)s')
+ % {'fn_name': self.fn_name,
+ 'max_index': len(split_list) - 1})
+ else:
+ res = split_list
+ return res
diff --git a/heat/engine/hot/template.py b/heat/engine/hot/template.py
index e329e836b..4666e82b3 100644
--- a/heat/engine/hot/template.py
+++ b/heat/engine/hot/template.py
@@ -339,6 +339,9 @@ class HOTemplate20151015(HOTemplate20150430):
'resource_facade': hot_funcs.ResourceFacade,
'str_replace': hot_funcs.Replace,
+ # functions added since 20150430
+ 'str_split': hot_funcs.StrSplit,
+
# functions removed from 20150430
'Fn::Select': hot_funcs.Removed,
diff --git a/heat/engine/parameters.py b/heat/engine/parameters.py
index 4f91bea4d..83d688228 100644
--- a/heat/engine/parameters.py
+++ b/heat/engine/parameters.py
@@ -80,6 +80,9 @@ class Schema(constr.Schema):
raise exception.InvalidSchemaError(
message=_('Default must be a comma-delimited list '
'string: %s') % err)
+ elif self.type == self.LIST and isinstance(self.default, list):
+ default_value = [encodeutils.safe_encode(six.text_type(x))
+ for x in self.default]
try:
self.validate_constraints(default_value, context,
[constr.CustomConstraint])
@@ -352,7 +355,8 @@ class CommaDelimitedListParam(Parameter, collections.Sequence):
def parse(self, value):
# only parse when value is not already a list
if isinstance(value, list):
- return value
+ return [encodeutils.safe_encode(six.text_type(x))
+ for x in value]
try:
if value is not None:
if value == '':
diff --git a/heat/engine/properties.py b/heat/engine/properties.py
index df595660b..e7e936c48 100644
--- a/heat/engine/properties.py
+++ b/heat/engine/properties.py
@@ -165,7 +165,8 @@ class Schema(constr.Schema):
constraints=param.constraints,
update_allowed=True,
immutable=False,
- allow_conversion=allow_conversion)
+ allow_conversion=allow_conversion,
+ default=param.default)
def allowed_param_prop_type(self):
"""
diff --git a/heat/engine/resource.py b/heat/engine/resource.py
index 92dda53ac..bc5859120 100644
--- a/heat/engine/resource.py
+++ b/heat/engine/resource.py
@@ -31,6 +31,7 @@ from heat.common import short_id
from heat.common import timeutils
from heat.engine import attributes
from heat.engine.cfn import template as cfn_tmpl
+from heat.engine import clients
from heat.engine import environment
from heat.engine import event
from heat.engine import function
@@ -116,6 +117,9 @@ class Resource(object):
# that describes the appropriate resource attributes
attributes_schema = {}
+ # Resource implementations set this to update policies
+ update_policy_schema = {}
+
# If True, this resource may perform authenticated API requests
# throughout its lifecycle
requires_deferred_auth = False
@@ -149,8 +153,18 @@ class Resource(object):
resource_name=name)
except exception.TemplateNotFound:
ResourceClass = template_resource.TemplateResource
+
assert issubclass(ResourceClass, Resource)
+ if not ResourceClass.is_service_available(stack.context):
+ ex = exception.StackResourceUnavailable(
+ service_name=ResourceClass.default_client_name,
+ resource_name=name
+ )
+ LOG.error(six.text_type(ex))
+
+ raise ex
+
return super(Resource, cls).__new__(ResourceClass)
def __init__(self, name, definition, stack):
@@ -499,6 +513,34 @@ class Resource(object):
assert client_name, "Must specify client name"
return self.stack.clients.client_plugin(client_name)
+ @classmethod
+ def is_service_available(cls, context):
+ # NOTE(kanagaraj-manickam): return True to satisfy the cases like
+ # resource does not have endpoint, such as RandomString, OS::Heat
+ # resources as they are implemented within the engine.
+ if cls.default_client_name is None:
+ return True
+
+ try:
+ client_plugin = clients.Clients(context).client_plugin(
+ cls.default_client_name)
+
+ service_types = client_plugin.service_types
+ if not service_types:
+ return True
+
+ # NOTE(kanagaraj-manickam): if one of the service_type does
+ # exist in the keystone, then considered it as available.
+ for service_type in service_types:
+ if client_plugin.does_endpoint_exist(
+ service_type=service_type,
+ service_name=cls.default_client_name):
+ return True
+ except Exception as ex:
+ LOG.exception(ex)
+
+ return False
+
def keystone(self):
return self.client('keystone')
@@ -625,22 +667,18 @@ class Resource(object):
'''
return self
- def create_convergence(self, template_id, resource_data, engine_id):
+ def create_convergence(self, resource_data, engine_id):
'''
- Creates the resource by invoking the scheduler TaskRunner
- and it persists the resource's current_template_id to template_id and
- resource's requires to list of the required resource id from the
- given resource_data.
+ Creates the resource by invoking the scheduler TaskRunner.
'''
with self.lock(engine_id):
+ self.requires = list(
+ set(data[u'id'] for data in resource_data.values()
+ if data is not None)
+ )
runner = scheduler.TaskRunner(self.create)
runner()
- # update the resource db record (stored in unlock())
- self.current_template_id = template_id
- self.requires = list(
- {graph_key[0] for graph_key, data in resource_data.items()})
-
@scheduler.wrappertask
def create(self):
'''
@@ -797,9 +835,10 @@ class Resource(object):
# update the resource db record (stored in unlock)
self.current_template_id = template_id
- current_requires = set(
- graph_key[0] for graph_key, data in resource_data.items())
- self.requires = list(set(self.requires) | current_requires)
+ self.requires = list(
+ set(data[u'id'] for data in resource_data.values()
+ if data is not None)
+ )
@scheduler.wrappertask
def update(self, after, before=None, prev_resource=None):
@@ -891,7 +930,10 @@ class Resource(object):
action = self.SUSPEND
# Don't try to suspend the resource unless it's in a stable state
- if (self.action == self.DELETE or self.status != self.COMPLETE):
+ # or if the previous suspend failed
+ if (self.action == self.DELETE or
+ (self.action != self.SUSPEND and
+ self.status != self.COMPLETE)):
exc = exception.Error(_('State %s invalid for suspend')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
@@ -906,12 +948,15 @@ class Resource(object):
'''
action = self.RESUME
- # Can't resume a resource unless it's SUSPEND_COMPLETE
- if self.state != (self.SUSPEND, self.COMPLETE):
+ # Allow resume a resource if it's SUSPEND_COMPLETE
+ # or RESUME_FAILED or RESUME_COMPLETE. Recommend to check
+ # the real state of physical resource in handle_resume()
+ if self.state not in ((self.SUSPEND, self.COMPLETE),
+ (self.RESUME, self.FAILED),
+ (self.RESUME, self.COMPLETE)):
exc = exception.Error(_('State %s invalid for resume')
% six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action)
-
LOG.info(_LI('resuming %s'), six.text_type(self))
return self._do_action(action)
@@ -991,23 +1036,15 @@ class Resource(object):
msg = _('"%s" deletion policy not supported') % policy
raise exception.StackValidationFailed(message=msg)
- def delete_convergence(self, template_id, resource_data, engine_id):
+ def delete_convergence(self, engine_id):
'''
- Deletes the resource by invoking the scheduler TaskRunner
- and it persists the resource's current_template_id to template_id and
- resource's requires to list of the required resource id from the
- given resource_data and existing resource's requires.
+ Destroys the resource. The destroy task is run in a scheduler
+ TaskRunner after acquiring the lock on resource.
'''
with self.lock(engine_id):
- runner = scheduler.TaskRunner(self.delete)
+ runner = scheduler.TaskRunner(self.destroy)
runner()
- # update the resource db record
- self.current_template_id = template_id
- current_requires = {graph_key[0]
- for graph_key, data in resource_data.items()}
- self.requires = (list(set(self.requires) - current_requires))
-
@scheduler.wrappertask
def delete(self):
'''
@@ -1179,14 +1216,16 @@ class Resource(object):
def unlock(self, rsrc, engine_id, atomic_key):
if atomic_key is None:
atomic_key = 0
- res = rsrc.select_and_update(
+
+ updated_ok = rsrc.select_and_update(
{'engine_id': None,
'current_template_id': self.current_template_id,
'updated_at': self.updated_time,
'requires': self.requires},
expected_engine_id=engine_id,
atomic_key=atomic_key + 1)
- if res != 1:
+
+ if not updated_ok:
LOG.warn(_LW('Failed to unlock resource %s'), rsrc.name)
def _resolve_attribute(self, name):
diff --git a/heat/engine/resources/aws/autoscaling/autoscaling_group.py b/heat/engine/resources/aws/autoscaling/autoscaling_group.py
index c7755bda0..02d07629f 100644
--- a/heat/engine/resources/aws/autoscaling/autoscaling_group.py
+++ b/heat/engine/resources/aws/autoscaling/autoscaling_group.py
@@ -354,8 +354,9 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
'group': notif['groupname']},
})
notification.send(**notif)
-
- self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))
+ finally:
+ self._cooldown_timestamp("%s : %s" % (adjustment_type,
+ adjustment))
def _tags(self):
"""Add Identifing Tags to all servers in the group.
diff --git a/heat/engine/resources/aws/autoscaling/scaling_policy.py b/heat/engine/resources/aws/autoscaling/scaling_policy.py
index d5a634085..0af313cb1 100644
--- a/heat/engine/resources/aws/autoscaling/scaling_policy.py
+++ b/heat/engine/resources/aws/autoscaling/scaling_policy.py
@@ -97,7 +97,7 @@ class AWSScalingPolicy(heat_sp.AutoScalingPolicy):
def FnGetRefId(self):
if self.resource_id is not None:
- return six.text_type(self._get_signed_url())
+ return six.text_type(self._get_ec2_signed_url())
else:
return six.text_type(self.name)
diff --git a/heat/engine/resources/aws/cfn/wait_condition_handle.py b/heat/engine/resources/aws/cfn/wait_condition_handle.py
index 0811a068c..9982c7431 100644
--- a/heat/engine/resources/aws/cfn/wait_condition_handle.py
+++ b/heat/engine/resources/aws/cfn/wait_condition_handle.py
@@ -45,7 +45,7 @@ class WaitConditionHandle(wc_base.BaseWaitConditionHandle):
'''
if self.resource_id:
wc = signal_responder.WAITCONDITION
- return six.text_type(self._get_signed_url(signal_type=wc))
+ return six.text_type(self._get_ec2_signed_url(signal_type=wc))
else:
return six.text_type(self.name)
diff --git a/heat/engine/resources/aws/ec2/instance.py b/heat/engine/resources/aws/ec2/instance.py
index 192e1416a..c0a2719b1 100644
--- a/heat/engine/resources/aws/ec2/instance.py
+++ b/heat/engine/resources/aws/ec2/instance.py
@@ -795,8 +795,11 @@ class Instance(resource.Resource):
else:
raise
else:
- LOG.debug("suspending instance %s" % self.resource_id)
- server.suspend()
+ # if the instance has been suspended successful,
+ # no need to suspend again
+ if self.client_plugin().get_status(server) != 'SUSPENDED':
+ LOG.debug("suspending instance %s" % self.resource_id)
+ server.suspend()
return server.id
def check_suspend_complete(self, server_id):
@@ -834,8 +837,11 @@ class Instance(resource.Resource):
else:
raise
else:
- LOG.debug("resuming instance %s" % self.resource_id)
- server.resume()
+ # if the instance has been resumed successful,
+ # no need to resume again
+ if self.client_plugin().get_status(server) != 'ACTIVE':
+ LOG.debug("resuming instance %s" % self.resource_id)
+ server.resume()
return server.id
def check_resume_complete(self, server_id):
diff --git a/heat/engine/resources/openstack/barbican/order.py b/heat/engine/resources/openstack/barbican/order.py
index ce95fe308..774982516 100644
--- a/heat/engine/resources/openstack/barbican/order.py
+++ b/heat/engine/resources/openstack/barbican/order.py
@@ -125,24 +125,43 @@ class Order(resource.Resource):
}
attributes_schema = {
- STATUS: attributes.Schema(_('The status of the order.')),
- ORDER_REF: attributes.Schema(_('The URI to the order.')),
- SECRET_REF: attributes.Schema(_('The URI to the created secret.')),
+ STATUS: attributes.Schema(
+ _('The status of the order.'),
+ type=attributes.Schema.STRING
+ ),
+ ORDER_REF: attributes.Schema(
+ _('The URI to the order.'),
+ type=attributes.Schema.STRING
+ ),
+ SECRET_REF: attributes.Schema(
+ _('The URI to the created secret.'),
+ type=attributes.Schema.STRING
+ ),
CONTAINER_REF: attributes.Schema(
_('The URI to the created container.'),
- support_status=support.SupportStatus(version='5.0.0')),
+ support_status=support.SupportStatus(version='5.0.0'),
+ type=attributes.Schema.STRING
+ ),
PUBLIC_KEY: attributes.Schema(
_('The payload of the created public key, if available.'),
- support_status=support.SupportStatus(version='5.0.0')),
+ support_status=support.SupportStatus(version='5.0.0'),
+ type=attributes.Schema.STRING
+ ),
PRIVATE_KEY: attributes.Schema(
_('The payload of the created private key, if available.'),
- support_status=support.SupportStatus(version='5.0.0')),
+ support_status=support.SupportStatus(version='5.0.0'),
+ type=attributes.Schema.STRING
+ ),
CERTIFICATE: attributes.Schema(
_('The payload of the created certificate, if available.'),
- support_status=support.SupportStatus(version='5.0.0')),
+ support_status=support.SupportStatus(version='5.0.0'),
+ type=attributes.Schema.STRING
+ ),
INTERMEDIATES: attributes.Schema(
_('The payload of the created intermediates, if available.'),
- support_status=support.SupportStatus(version='5.0.0')),
+ support_status=support.SupportStatus(version='5.0.0'),
+ type=attributes.Schema.STRING
+ ),
}
def barbican(self):
diff --git a/heat/engine/resources/openstack/barbican/secret.py b/heat/engine/resources/openstack/barbican/secret.py
index bf9045798..d5ac95e23 100644
--- a/heat/engine/resources/openstack/barbican/secret.py
+++ b/heat/engine/resources/openstack/barbican/secret.py
@@ -98,10 +98,12 @@ class Secret(resource.Resource):
attributes_schema = {
STATUS: attributes.Schema(
- _('The status of the secret.')
+ _('The status of the secret.'),
+ type=attributes.Schema.STRING
),
DECRYPTED_PAYLOAD: attributes.Schema(
- _('The decrypted secret payload.')
+ _('The decrypted secret payload.'),
+ type=attributes.Schema.STRING
),
}
diff --git a/heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py b/heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py
new file mode 100644
index 000000000..4596d18ba
--- /dev/null
+++ b/heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py
@@ -0,0 +1,121 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common.i18n import _
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine import resource
+from heat.engine import support
+
+
+class CinderEncryptedVolumeType(resource.Resource):
+ """
+ A resource for encrypting a cinder volume type.
+
+ Note that default cinder security policy usage of this resource
+ is limited to being used by administrators only.
+ """
+
+ support_status = support.SupportStatus(version='5.0.0')
+
+ default_client_name = 'cinder'
+
+ PROPERTIES = (
+ PROVIDER, CONTROL_LOCATION, CIPHER, KEY_SIZE, VOLUME_TYPE
+ ) = (
+ 'provider', 'control_location', 'cipher', 'key_size', 'volume_type'
+ )
+
+ properties_schema = {
+ PROVIDER: properties.Schema(
+ properties.Schema.STRING,
+ _('The class that provides encryption support. '
+ 'For example, nova.volume.encryptors.luks.LuksEncryptor.'),
+ required=True,
+ update_allowed=True
+ ),
+ CONTROL_LOCATION: properties.Schema(
+ properties.Schema.STRING,
+ _('Notional service where encryption is performed '
+ 'For example, front-end. For Nova.'),
+ constraints=[
+ constraints.AllowedValues(['front-end', 'back-end'])
+ ],
+ default='front-end',
+ update_allowed=True
+ ),
+ CIPHER: properties.Schema(
+ properties.Schema.STRING,
+ _('The encryption algorithm or mode. '
+ 'For example, aes-xts-plain64.'),
+ constraints=[
+ constraints.AllowedValues(
+ ['aes-xts-plain64', 'aes-cbc-essiv']
+ )
+ ],
+ default=None,
+ update_allowed=True
+ ),
+ KEY_SIZE: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Size of encryption key, in bits. '
+ 'For example, 128 or 256.'),
+ default=None,
+ update_allowed=True
+ ),
+ VOLUME_TYPE: properties.Schema(
+ properties.Schema.STRING,
+ _('Name or id of volume type (OS::Cinder::VolumeType).'),
+ required=True,
+ constraints=[constraints.CustomConstraint('cinder.vtype')]
+ ),
+ }
+
+ def _get_vol_type_id(self, volume_type):
+ id = self.client_plugin().get_volume_type(volume_type)
+ return id
+
+ def handle_create(self):
+ body = {
+ 'provider': self.properties[self.PROVIDER],
+ 'cipher': self.properties[self.CIPHER],
+ 'key_size': self.properties[self.KEY_SIZE],
+ 'control_location': self.properties[self.CONTROL_LOCATION]
+ }
+
+ vol_type_id = self._get_vol_type_id(self.properties[self.VOLUME_TYPE])
+
+ encrypted_vol_type = self.cinder().volume_encryption_types.create(
+ volume_type=vol_type_id, specs=body
+ )
+ self.resource_id_set(encrypted_vol_type.volume_type_id)
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ if prop_diff:
+ self.cinder().volume_encryption_types.update(
+ volume_type=self.resource_id, specs=prop_diff
+ )
+
+ def handle_delete(self):
+ if self.resource_id is None:
+ return
+ try:
+ self.cinder().volume_encryption_types.delete(self.resource_id)
+ except Exception as e:
+ self.client_plugin().ignore_not_found(e)
+
+
+def resource_mapping():
+ return {
+ 'OS::Cinder::EncryptedVolumeType': CinderEncryptedVolumeType
+ }
diff --git a/heat/engine/resources/openstack/heat/ha_restarter.py b/heat/engine/resources/openstack/heat/ha_restarter.py
index 74db80af7..4dbfa20cf 100644
--- a/heat/engine/resources/openstack/heat/ha_restarter.py
+++ b/heat/engine/resources/openstack/heat/ha_restarter.py
@@ -103,7 +103,7 @@ class Restarter(signal_responder.SignalResponder):
when there is an alarm.
'''
if name == self.ALARM_URL and self.resource_id is not None:
- return six.text_type(self._get_signed_url())
+ return six.text_type(self._get_ec2_signed_url())
def resource_mapping():
diff --git a/heat/engine/resources/openstack/heat/scaling_policy.py b/heat/engine/resources/openstack/heat/scaling_policy.py
index 957f5ed5d..41e76ac0a 100644
--- a/heat/engine/resources/openstack/heat/scaling_policy.py
+++ b/heat/engine/resources/openstack/heat/scaling_policy.py
@@ -170,29 +170,33 @@ class AutoScalingPolicy(signal_responder.SignalResponder,
asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
group = self.stack.resource_by_refid(asgn_id)
- if group is None:
- raise exception.NotFound(_('Alarm %(alarm)s could not find '
- 'scaling group named "%(group)s"') % {
- 'alarm': self.name,
- 'group': asgn_id})
-
- LOG.info(_LI('%(name)s Alarm, adjusting Group %(group)s with id '
- '%(asgn_id)s by %(filter)s'),
- {'name': self.name, 'group': group.name, 'asgn_id': asgn_id,
- 'filter': self.properties[self.SCALING_ADJUSTMENT]})
- adjustment_type = self._get_adjustement_type()
- group.adjust(self.properties[self.SCALING_ADJUSTMENT], adjustment_type,
- self.properties[self.MIN_ADJUSTMENT_STEP])
-
- self._cooldown_timestamp("%s : %s" %
- (self.properties[self.ADJUSTMENT_TYPE],
- self.properties[self.SCALING_ADJUSTMENT]))
+ try:
+ if group is None:
+ raise exception.NotFound(_('Alarm %(alarm)s could not find '
+ 'scaling group named "%(group)s"'
+ ) % {'alarm': self.name,
+ 'group': asgn_id})
+
+ LOG.info(_LI('%(name)s Alarm, adjusting Group %(group)s with id '
+ '%(asgn_id)s by %(filter)s'),
+ {'name': self.name, 'group': group.name,
+ 'asgn_id': asgn_id,
+ 'filter': self.properties[self.SCALING_ADJUSTMENT]})
+ adjustment_type = self._get_adjustement_type()
+ group.adjust(self.properties[self.SCALING_ADJUSTMENT],
+ adjustment_type,
+ self.properties[self.MIN_ADJUSTMENT_STEP])
+
+ finally:
+ self._cooldown_timestamp("%s : %s" % (
+ self.properties[self.ADJUSTMENT_TYPE],
+ self.properties[self.SCALING_ADJUSTMENT]))
def _resolve_attribute(self, name):
if name == self.ALARM_URL:
- return six.text_type(self._get_signed_url())
+ return six.text_type(self._get_ec2_signed_url())
elif name == self.SIGNAL_URL:
- return six.text_type(self._get_signal_url())
+ return six.text_type(self._get_heat_signal_url())
def FnGetRefId(self):
return resource.Resource.FnGetRefId(self)
diff --git a/heat/engine/resources/openstack/heat/software_deployment.py b/heat/engine/resources/openstack/heat/software_deployment.py
index 79d725899..0b87aea57 100644
--- a/heat/engine/resources/openstack/heat/software_deployment.py
+++ b/heat/engine/resources/openstack/heat/software_deployment.py
@@ -425,7 +425,7 @@ class SoftwareDeployment(signal_responder.SignalResponder):
scl.DESCRIPTION: _('ID of signal to use for signaling '
'output values'),
scl.TYPE: 'String',
- 'value': self._get_signed_url()
+ 'value': self._get_ec2_signed_url()
})
inputs.append({
scl.NAME: self.DEPLOY_SIGNAL_VERB,
@@ -537,7 +537,7 @@ class SoftwareDeployment(signal_responder.SignalResponder):
def _delete_resource(self):
if self._signal_transport_cfn():
- self._delete_signed_url()
+ self._delete_ec2_signed_url()
self._delete_user()
elif self._signal_transport_heat():
self._delete_user()
diff --git a/heat/engine/resources/openstack/manila/share_network.py b/heat/engine/resources/openstack/manila/share_network.py
index 2d165c5b8..aabcc8a14 100644
--- a/heat/engine/resources/openstack/manila/share_network.py
+++ b/heat/engine/resources/openstack/manila/share_network.py
@@ -63,7 +63,8 @@ class ManilaShareNetwork(resource.Resource):
NOVA_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Nova network id.'),
- update_allowed=True
+ update_allowed=True,
+ constraints=[constraints.CustomConstraint('nova.network')]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
diff --git a/heat/engine/resources/openstack/mistral/workflow.py b/heat/engine/resources/openstack/mistral/workflow.py
index 47befc64d..0b58d521a 100644
--- a/heat/engine/resources/openstack/mistral/workflow.py
+++ b/heat/engine/resources/openstack/mistral/workflow.py
@@ -408,7 +408,7 @@ class Workflow(signal_responder.SignalResponder,
self.INPUT: self.properties.get(self.INPUT)}
elif name == self.ALARM_URL:
- return six.text_type(self._get_signed_url())
+ return six.text_type(self._get_ec2_signed_url())
def resource_mapping():
diff --git a/heat/engine/resources/openstack/nova/server.py b/heat/engine/resources/openstack/nova/server.py
index 8f00c7c1a..6b00e0342 100644
--- a/heat/engine/resources/openstack/nova/server.py
+++ b/heat/engine/resources/openstack/nova/server.py
@@ -1405,8 +1405,11 @@ class Server(stack_user.StackUser):
else:
raise
else:
- LOG.debug('suspending server %s' % self.resource_id)
- server.suspend()
+ # if the server has been suspended successful,
+ # no need to suspend again
+ if self.client_plugin().get_status(server) != 'SUSPENDED':
+ LOG.debug('suspending server %s' % self.resource_id)
+ server.suspend()
return server.id
def check_suspend_complete(self, server_id):
@@ -1444,8 +1447,11 @@ class Server(stack_user.StackUser):
else:
raise
else:
- LOG.debug('resuming server %s' % self.resource_id)
- server.resume()
+ # if the server has been resumed successful,
+ # no need to resume again
+ if self.client_plugin().get_status(server) != 'ACTIVE':
+ LOG.debug('resuming server %s' % self.resource_id)
+ server.resume()
return server.id
def check_resume_complete(self, server_id):
diff --git a/heat/engine/resources/signal_responder.py b/heat/engine/resources/signal_responder.py
index 87472fb3d..5693bf596 100644
--- a/heat/engine/resources/signal_responder.py
+++ b/heat/engine/resources/signal_responder.py
@@ -42,12 +42,13 @@ class SignalResponder(stack_user.StackUser):
def handle_delete(self):
super(SignalResponder, self).handle_delete()
- self._delete_signed_url()
+ self._delete_ec2_signed_url()
+ self._delete_heat_signal_url()
- def _delete_signed_url(self):
+ def _delete_ec2_signed_url(self):
self.data_delete('ec2_signed_url')
- def _get_signed_url(self, signal_type=SIGNAL):
+ def _get_ec2_signed_url(self, signal_type=SIGNAL):
"""Create properly formatted and pre-signed URL.
This uses the created user for the credentials.
@@ -105,8 +106,11 @@ class SignalResponder(stack_user.StackUser):
self.data_set('ec2_signed_url', url)
return url
- def _get_signal_url(self):
- stored = self.data().get('signal_url')
+ def _delete_heat_signal_url(self):
+ self.data_delete('heat_signal_url')
+
+ def _get_heat_signal_url(self):
+ stored = self.data().get('heat_signal_url')
if stored is not None:
return stored
@@ -117,5 +121,5 @@ class SignalResponder(stack_user.StackUser):
url = urlparse.urlunsplit(
(host_url.scheme, host_url.netloc, 'v1/%s/signal' % path, '', ''))
- self.data_set('signal_url', url)
+ self.data_set('heat_signal_url', url)
return url
diff --git a/heat/engine/resources/template_resource.py b/heat/engine/resources/template_resource.py
index 0f633cdeb..e2c721e1a 100644
--- a/heat/engine/resources/template_resource.py
+++ b/heat/engine/resources/template_resource.py
@@ -73,6 +73,7 @@ class TemplateResource(stack_resource.StackResource):
else:
self.template_name = tri.template_name
self.resource_type = tri.name
+ self.resource_path = tri.path
if tri.user_resource:
self.allowed_schemes = ('http', 'https')
else:
@@ -192,7 +193,8 @@ class TemplateResource(stack_resource.StackResource):
if t_data is not None:
self.stack.t.files[self.template_name] = t_data
self.stack.t.env.register_class(self.resource_type,
- self.template_name)
+ self.template_name,
+ path=self.resource_path)
return t_data
if reported_excp is None:
reported_excp = ValueError(_('Unknown error retrieving %s') %
diff --git a/heat/engine/service.py b/heat/engine/service.py
index cbb97a112..4b2c17489 100644
--- a/heat/engine/service.py
+++ b/heat/engine/service.py
@@ -267,7 +267,7 @@ class EngineService(service.Service):
by the RPC caller.
"""
- RPC_API_VERSION = '1.11'
+ RPC_API_VERSION = '1.12'
def __init__(self, host, topic, manager=None):
super(EngineService, self).__init__()
@@ -1023,7 +1023,7 @@ class EngineService(service.Service):
:param cnxt: RPC context.
"""
- return resources.global_env().get_types(support_status)
+ return resources.global_env().get_types(cnxt, support_status)
def list_template_versions(self, cnxt):
mgr = templatem._get_template_extension_manager()
@@ -1256,12 +1256,13 @@ class EngineService(service.Service):
if resource_name is None or name == resource_name]
@context.request_context
- def list_stack_resources(self, cnxt, stack_identity, nested_depth=0):
+ def list_stack_resources(self, cnxt, stack_identity,
+ nested_depth=0, with_detail=False):
s = self._get_stack(cnxt, stack_identity, show_deleted=True)
stack = parser.Stack.load(cnxt, stack=s)
depth = min(nested_depth, cfg.CONF.max_nested_stack_depth)
- return [api.format_stack_resource(resource, detail=False)
+ return [api.format_stack_resource(resource, detail=with_detail)
for resource in stack.iter_resources(depth)]
@context.request_context
diff --git a/heat/engine/stack.py b/heat/engine/stack.py
index 1838405b4..7facc45f1 100755
--- a/heat/engine/stack.py
+++ b/heat/engine/stack.py
@@ -847,9 +847,11 @@ class Stack(collections.Mapping):
@profiler.trace('Stack.check', hide_args=False)
def check(self):
self.updated_time = datetime.datetime.utcnow()
- checker = scheduler.TaskRunner(self.stack_task, self.CHECK,
- post_func=self.supports_check_action,
- aggregate_exceptions=True)
+ checker = scheduler.TaskRunner(
+ self.stack_task, self.CHECK,
+ post_func=self.supports_check_action,
+ error_wait_time=cfg.CONF.error_wait_time,
+ aggregate_exceptions=True)
checker()
def supports_check_action(self):
@@ -910,6 +912,7 @@ class Stack(collections.Mapping):
self.stack_task,
action=self.ADOPT,
reverse=False,
+ error_wait_time=cfg.CONF.error_wait_time,
post_func=rollback)
creator(timeout=self.timeout_secs())
@@ -1399,9 +1402,11 @@ class Stack(collections.Mapping):
return
self.updated_time = datetime.datetime.utcnow()
- sus_task = scheduler.TaskRunner(self.stack_task,
- action=self.SUSPEND,
- reverse=True)
+ sus_task = scheduler.TaskRunner(
+ self.stack_task,
+ action=self.SUSPEND,
+ reverse=True,
+ error_wait_time=cfg.CONF.error_wait_time)
sus_task(timeout=self.timeout_secs())
@profiler.trace('Stack.resume', hide_args=False)
@@ -1420,18 +1425,23 @@ class Stack(collections.Mapping):
return
self.updated_time = datetime.datetime.utcnow()
- sus_task = scheduler.TaskRunner(self.stack_task,
- action=self.RESUME,
- reverse=False)
+ sus_task = scheduler.TaskRunner(
+ self.stack_task,
+ action=self.RESUME,
+ reverse=False,
+ error_wait_time=cfg.CONF.error_wait_time)
sus_task(timeout=self.timeout_secs())
@profiler.trace('Stack.snapshot', hide_args=False)
def snapshot(self, save_snapshot_func):
'''Snapshot the stack, invoking handle_snapshot on all resources.'''
self.updated_time = datetime.datetime.utcnow()
- sus_task = scheduler.TaskRunner(self.stack_task, action=self.SNAPSHOT,
- reverse=False,
- pre_completion_func=save_snapshot_func)
+ sus_task = scheduler.TaskRunner(
+ self.stack_task,
+ action=self.SNAPSHOT,
+ reverse=False,
+ error_wait_time=cfg.CONF.error_wait_time,
+ pre_completion_func=save_snapshot_func)
sus_task(timeout=self.timeout_secs())
@profiler.trace('Stack.delete_snapshot', hide_args=False)
@@ -1605,9 +1615,10 @@ class Stack(collections.Mapping):
1. Delete previous raw template if stack completes successfully.
2. Deletes all sync points. They are no longer needed after stack
has completed/failed.
- 3. Delete the stack is the action is DELETE.
+ 3. Delete the stack if the action is DELETE.
'''
- if self.prev_raw_template_id is not None:
+ if (self.prev_raw_template_id is not None and
+ self.status != self.FAILED):
prev_tmpl_id = self.prev_raw_template_id
self.prev_raw_template_id = None
self.store()
diff --git a/heat/engine/worker.py b/heat/engine/worker.py
index 1103d2006..28b313d27 100644
--- a/heat/engine/worker.py
+++ b/heat/engine/worker.py
@@ -92,7 +92,7 @@ class WorkerService(service.Service):
resource_id)
if (rs_obj.engine_id != self.engine_id and
rs_obj.engine_id is not None):
- if not listener_client.EngineListnerClient(
+ if not listener_client.EngineListenerClient(
rs_obj.engine_id).is_alive(cnxt):
# steal the lock.
rs_obj.update_and_save({'engine_id': None})
@@ -178,8 +178,9 @@ class WorkerService(service.Service):
current_traversal,
data, is_update)
return
- except exception.ResourceFailure as e:
- reason = six.text_type(e)
+ except exception.ResourceFailure as ex:
+ reason = 'Resource %s failed: %s' % (stack.action,
+ six.text_type(ex))
self._handle_resource_failure(
cnxt, stack.id, current_traversal, reason)
return
@@ -195,13 +196,14 @@ class WorkerService(service.Service):
current_traversal,
data, is_update)
return
- except exception.ResourceFailure as e:
- reason = six.text_type(e)
+ except exception.ResourceFailure as ex:
+ reason = 'Resource %s failed: %s' % (stack.action,
+ six.text_type(ex))
self._handle_resource_failure(
cnxt, stack.id, current_traversal, reason)
return
- graph_key = (rsrc.id, is_update)
+ graph_key = (resource_id, is_update)
if graph_key not in graph and rsrc.replaces is not None:
# If we are a replacement, impersonate the replaced resource for
# the purposes of calculating whether subsequent resources are
@@ -219,7 +221,7 @@ class WorkerService(service.Service):
input_data if fwd else None, fwd)
check_stack_complete(cnxt, rsrc.stack, current_traversal,
- rsrc.id, deps, is_update)
+ resource_id, deps, is_update)
except sync_point.SyncPointNotFound:
# Reload the stack to determine the current traversal, and check
# the SyncPoint for the current node to determine if it is ready.
@@ -309,7 +311,7 @@ def check_resource_update(rsrc, template_id, data, engine_id):
resource.Resource.COMPLETE,
resource.Resource.FAILED
])):
- rsrc.create_convergence(template_id, data, engine_id)
+ rsrc.create_convergence(data, engine_id)
else:
rsrc.update_convergence(template_id, data, engine_id)
@@ -320,4 +322,4 @@ def check_resource_cleanup(rsrc, template_id, data, engine_id):
'''
if rsrc.current_template_id != template_id:
- rsrc.delete_convergence(template_id, data, engine_id)
+ rsrc.delete_convergence(engine_id)
diff --git a/heat/locale/de/LC_MESSAGES/heat-log-error.po b/heat/locale/de/LC_MESSAGES/heat-log-error.po
deleted file mode 100644
index 939081314..000000000
--- a/heat/locale/de/LC_MESSAGES/heat-log-error.po
+++ /dev/null
@@ -1,63 +0,0 @@
-# Translations template for heat.
-# Copyright (C) 2015 ORGANIZATION
-# This file is distributed under the same license as the heat project.
-#
-# Translators:
-# Andreas Jaeger <jaegerandi@gmail.com>, 2014
-# Ettore Atalan <atalanttore@googlemail.com>, 2014
-msgid ""
-msgstr ""
-"Project-Id-Version: Heat\n"
-"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-05-30 06:05+0000\n"
-"PO-Revision-Date: 2015-05-29 10:46+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: German (http://www.transifex.com/projects/p/heat/language/"
-"de/)\n"
-"Language: de\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 1.3\n"
-"Plural-Forms: nplurals=2; plural=(n != 1);\n"
-
-#, python-format
-msgid "DB error %s"
-msgstr "Datenbankfehler %s"
-
-#, python-format
-msgid "Exception handling resource: %s"
-msgstr "Ausnahmebehandlungsressource: %s"
-
-msgid "Exception in string format operation"
-msgstr "Ausnahme bei Zeichenfolgeformatoperation"
-
-#, python-format
-msgid "Exception: %s"
-msgstr "Ausnahmesituation: %s"
-
-msgid "Port not specified."
-msgstr "Port nicht angegeben."
-
-#, python-format
-msgid "Removing dead child %s"
-msgstr "Entfernen von inaktivem untergeordnetem Element %s"
-
-msgid "SIGHUP received"
-msgstr "SIGHUP empfangen"
-
-msgid "SIGTERM received"
-msgstr "SIGTERM erhalten"
-
-msgid "Unhandled exception"
-msgstr "Nicht behandelte Ausnahme"
-
-msgid "in dynamic looping call"
-msgstr "in dynamischen Schleifenaufruf"
-
-msgid "in fixed duration looping call"
-msgstr "in Schleifenaufruf mit festgelegter Dauer"
-
-#, python-format
-msgid "signal %(name)s : %(msg)s"
-msgstr "Signal %(name)s : %(msg)s"
diff --git a/heat/locale/es/LC_MESSAGES/heat-log-error.po b/heat/locale/es/LC_MESSAGES/heat-log-error.po
index eba7dccd6..4fb154dea 100644
--- a/heat/locale/es/LC_MESSAGES/heat-log-error.po
+++ b/heat/locale/es/LC_MESSAGES/heat-log-error.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Heat\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-06-22 06:02+0000\n"
-"PO-Revision-Date: 2015-06-17 20:52+0000\n"
+"POT-Creation-Date: 2015-07-08 06:02+0000\n"
+"PO-Revision-Date: 2015-07-07 07:51+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: Spanish (http://www.transifex.com/projects/p/heat/language/"
"es/)\n"
@@ -21,10 +21,6 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#, python-format
-msgid "%(opname) %(ci)s failed for %(a)s on %(sid)s"
-msgstr "%(opname) %(ci)s fallo por %(a)s en %(sid)s"
-
-#, python-format
msgid "DB error %s"
msgstr "Error de base de datos %s"
@@ -33,15 +29,6 @@ msgstr ""
"Ec2Token autorización ha fallado, ningún auth_uri especificado en el archivo "
"de configuración"
-msgid "Error stopping thread."
-msgstr "Error al detener la línea."
-
-msgid "Error stopping timer."
-msgstr "Error al detener temporizador."
-
-msgid "Error waiting on ThreadGroup."
-msgstr "Error al esperar en ThreadGroup."
-
#, python-format
msgid "Exception handling resource: %s"
msgstr "Excepción al manejar recurso: %s"
@@ -58,8 +45,10 @@ msgid "Failed to read %s"
msgstr "Ha fallado leer %s"
#, python-format
-msgid "Removing dead child %s"
-msgstr "Eliminando hijo muerto %s"
+msgid "Not respawning child %d, cannot recover from termination"
+msgstr ""
+"No se va a volver a generar el hijo %d, no se puede recuperar de la "
+"terminación"
#, python-format
msgid "Request does not contain %s parameter!"
@@ -83,9 +72,6 @@ msgstr "No ha sido posible obtener la pila %s para tareas periódicas"
msgid "Unexpected error occurred serving API: %s"
msgstr "Error inesperado ha ocurrido sirviendo API: %s"
-msgid "Unhandled exception"
-msgstr "Excepción no controlada"
-
msgid "failed to get lifecycle plug point classes"
msgstr "fallo a obtener clases de punto de conexión de ciclo de vida "
@@ -98,12 +84,6 @@ msgstr ""
"ha ocurrido un error al ordenar las clases del punto de conexión de ciclo de "
"vida"
-msgid "in dynamic looping call"
-msgstr "en llamada en bucle dinámica"
-
-msgid "in fixed duration looping call"
-msgstr "en llamada en bucle de duración fija"
-
#, python-format
msgid "signal %(name)s : %(msg)s"
msgstr "señal %(name)s : %(msg)s"
diff --git a/heat/locale/es/LC_MESSAGES/heat-log-info.po b/heat/locale/es/LC_MESSAGES/heat-log-info.po
index ed2750147..129031de9 100644
--- a/heat/locale/es/LC_MESSAGES/heat-log-info.po
+++ b/heat/locale/es/LC_MESSAGES/heat-log-info.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Heat\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-06-17 06:06+0000\n"
-"PO-Revision-Date: 2015-06-10 21:55+0000\n"
+"POT-Creation-Date: 2015-07-08 06:01+0000\n"
+"PO-Revision-Date: 2015-07-07 07:51+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: Spanish (http://www.transifex.com/projects/p/heat/language/"
"es/)\n"
@@ -29,18 +29,13 @@ msgstr "Autenticación AWS correcta."
msgid "AWS credentials found, checking against keystone."
msgstr "Credenciales AWS encontradas, comprobándolas contra keystone."
+msgid "All workers have terminated. Exiting"
+msgstr "Todos los trabajadores han terminado. Saliendo"
+
#, python-format
msgid "Authenticating with %s"
msgstr "Autenticándo con %s"
-#, python-format
-msgid "Caught %s, exiting"
-msgstr "Se ha captado %s, saliendo"
-
-#, python-format
-msgid "Caught %s, stopping children"
-msgstr "Se ha captado %s, deteniendo hijos"
-
msgid "Caught keyboard interrupt. Exiting."
msgstr "Se ha generado interrupción de teclado. Saliendo."
@@ -52,32 +47,13 @@ msgid "Checking AWS credentials.."
msgstr "Comprobando credenciales AWS.."
#, python-format
-msgid "Child %(pid)d killed by signal %(sig)d"
-msgstr "Hijo %(pid)d matado por señal %(sig)d"
-
-#, python-format
-msgid "Child %(pid)s exited with status %(code)d"
-msgstr "El hijo %(pid)s ha salido con el estado %(code)d"
-
-#, python-format
msgid "Child %d exiting normally"
msgstr "El hijo %d está saliendo de forma normal"
#, python-format
-msgid "Child caught %s, exiting"
-msgstr "Hijo captado %s, saliendo"
-
-#, python-format
-msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
-msgstr "Eventlet oculto escuchando en %(port)s para el proceso %(pid)d"
-
-#, python-format
msgid "Fetching data from %s"
msgstr "Obteniendo datos desde %s"
-msgid "Forking too fast, sleeping"
-msgstr "Bifurcación demasiado rápida, en reposo"
-
#, python-format
msgid "Loading %s"
msgstr "Cargando %s"
@@ -88,20 +64,21 @@ msgstr "Firma AWS Signature no encontrada."
msgid "No AWSAccessKeyId/Authorization Credential"
msgstr "Credencial AWSAccessKeyId/Authorization no encontrada"
-msgid "Parent process has died unexpectedly, exiting"
-msgstr "El proceso padre se ha detenido inesperadamente, saliendo"
-
#, python-format
msgid "Registering %(path)s -> %(value)s"
msgstr "Registrando %(path)s -> %(value)s"
#, python-format
-msgid "Stack %s processing was finished"
-msgstr "Pila %s procesando ha finalizado"
+msgid "Removed dead child %s"
+msgstr "Se ha eliminado hijo muerto %s"
+
+#, python-format
+msgid "Removed stale child %s"
+msgstr "Se ha eliminado hijo obsoleto %s"
#, python-format
-msgid "Started child %d"
-msgstr "Se ha iniciado el hijo %d"
+msgid "Stack %s processing was finished"
+msgstr "Pila %s procesando ha finalizado"
#, python-format
msgid "Started child %s"
@@ -114,13 +91,6 @@ msgstr "Iniciando %d trabajadores"
msgid "Starting single process server"
msgstr "Iniciando servidor de proceso individual"
-msgid "Wait called after thread killed. Cleaning up."
-msgstr "Esperar llamado después de cortar la línea. Limpiando."
-
-#, python-format
-msgid "Waiting on %d children to exit"
-msgstr "En espera de %d hijos para salir"
-
#, python-format
msgid "creating %s"
msgstr "creando %s"
diff --git a/heat/locale/fr/LC_MESSAGES/heat-log-error.po b/heat/locale/fr/LC_MESSAGES/heat-log-error.po
index 5beb231c4..f44972a6e 100644
--- a/heat/locale/fr/LC_MESSAGES/heat-log-error.po
+++ b/heat/locale/fr/LC_MESSAGES/heat-log-error.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Heat\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-05-30 06:05+0000\n"
-"PO-Revision-Date: 2015-05-29 10:46+0000\n"
+"POT-Creation-Date: 2015-07-08 06:02+0000\n"
+"PO-Revision-Date: 2015-07-07 07:51+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: French (http://www.transifex.com/projects/p/heat/language/"
"fr/)\n"
@@ -21,8 +21,8 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
#, python-format
-msgid "%(opname) %(ci)s failed for %(a)s on %(sid)s"
-msgstr "%(opname) %(ci)s échec pour %(a)s sur %(sid)s"
+msgid "%(name)s: %(value)s"
+msgstr "%(name)s: %(value)s"
#, python-format
msgid "DB error %s"
@@ -82,14 +82,14 @@ msgstr ""
msgid "Metadata failed validation for %s"
msgstr "Echec de validation de metadata pour %s"
+#, python-format
+msgid "Not respawning child %d, cannot recover from termination"
+msgstr "Aucune relance de l'enfant %d, récupération impossible après arrêt"
+
msgid "Port not specified."
msgstr "Port non spécifié"
#, python-format
-msgid "Removing dead child %s"
-msgstr "Retrait de l'enfant arrêté %s"
-
-#, python-format
msgid "Request does not contain %s parameter!"
msgstr "La requete ne doit pas contenir le paramètre %s!"
@@ -122,15 +122,6 @@ msgstr "Action inattendu %s à supprimer!"
msgid "Unexpected action %s passed to update!"
msgstr "Action inattendu %s à mettre à jour!"
-msgid "Unhandled exception"
-msgstr "Exception non gérée"
-
-msgid "in dynamic looping call"
-msgstr "dans l'appel en boucle dynamique"
-
-msgid "in fixed duration looping call"
-msgstr "dans l'appel en boucle de durée fixe"
-
#, python-format
msgid "signal %(name)s : %(msg)s"
msgstr "signal %(name)s : %(msg)s"
diff --git a/heat/locale/fr/LC_MESSAGES/heat-log-info.po b/heat/locale/fr/LC_MESSAGES/heat-log-info.po
index 17191e1c5..65f2072c2 100644
--- a/heat/locale/fr/LC_MESSAGES/heat-log-info.po
+++ b/heat/locale/fr/LC_MESSAGES/heat-log-info.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Heat\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-05-30 06:05+0000\n"
-"PO-Revision-Date: 2015-05-29 10:46+0000\n"
+"POT-Creation-Date: 2015-07-08 06:01+0000\n"
+"PO-Revision-Date: 2015-07-07 07:51+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: French (http://www.transifex.com/projects/p/heat/language/"
"fr/)\n"
@@ -49,18 +49,13 @@ msgstr "Succès de l'authentification AWS"
msgid "AWS credentials found, checking against keystone."
msgstr "Information de compte AWS trouvé, vérification avec keystone."
+msgid "All workers have terminated. Exiting"
+msgstr "Tous les agents ont terminé. Quittez"
+
#, python-format
msgid "Authenticating with %s"
msgstr "Authentification avec %s"
-#, python-format
-msgid "Caught %s, exiting"
-msgstr "%s interceptée, sortie"
-
-#, python-format
-msgid "Caught %s, stopping children"
-msgstr "%s interceptée, arrêt de l'enfant"
-
msgid "Caught keyboard interrupt. Exiting."
msgstr "Interruption interceptée de clavier. Fermeture du programme en cours."
@@ -76,22 +71,10 @@ msgid "Checking stack %s"
msgstr "Vérification de la stack %s"
#, python-format
-msgid "Child %(pid)d killed by signal %(sig)d"
-msgstr "Enfant %(pid)d arrêté par le signal %(sig)d"
-
-#, python-format
-msgid "Child %(pid)s exited with status %(code)d"
-msgstr "Processus fils %(pid)s terminé avec le status %(code)d"
-
-#, python-format
msgid "Child %d exiting normally"
msgstr "Sortie normale de l'enfant %d"
#, python-format
-msgid "Child caught %s, exiting"
-msgstr "L'enfant a reçu %s, sortie"
-
-#, python-format
msgid "Creating stack %s"
msgstr "Création de la stack %s"
@@ -100,16 +83,9 @@ msgid "Deleting stack %s"
msgstr "Suppresion de la stack %s"
#, python-format
-msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
-msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d"
-
-#, python-format
msgid "Fetching data from %s"
msgstr "Recherche de donnée depuis %s"
-msgid "Forking too fast, sleeping"
-msgstr "Bifurcation trop rapide, pause"
-
#, python-format
msgid "Loading %s"
msgstr "Chargement %s"
@@ -120,18 +96,11 @@ msgstr "Signature AWS non trouvé"
msgid "No AWSAccessKeyId/Authorization Credential"
msgstr "Pas AWSAccessKeyId/Authorisation de compte"
-msgid "Parent process has died unexpectedly, exiting"
-msgstr "Processus parent arrêté de manière inattendue, sortie"
-
#, python-format
msgid "Stack create failed, status %s"
msgstr "Echec de création de la stack, status %s"
#, python-format
-msgid "Started child %d"
-msgstr "Enfant démarré %d"
-
-#, python-format
msgid "Started child %s"
msgstr "Enfant démarré %s"
@@ -150,13 +119,6 @@ msgstr "Mise à jour de la stack %s"
msgid "Validating %s"
msgstr "Validation %s"
-msgid "Wait called after thread killed. Cleaning up."
-msgstr "Pause demandée après suppression de thread. Nettoyage."
-
-#, python-format
-msgid "Waiting on %d children to exit"
-msgstr "En attente %d enfants pour sortie"
-
#, python-format
msgid "abandoning stack %s"
msgstr "Abandon de la stack %s"
diff --git a/heat/locale/heat-log-error.pot b/heat/locale/heat-log-error.pot
index bb2f22471..343b160e3 100644
--- a/heat/locale/heat-log-error.pot
+++ b/heat/locale/heat-log-error.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: heat 2015.2.0.dev518\n"
+"Project-Id-Version: heat 5.0.0.0b2.dev164\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-06-17 06:06+0000\n"
+"POT-Creation-Date: 2015-07-08 06:02+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -38,6 +38,11 @@ msgstr ""
msgid "Exception in string format operation"
msgstr ""
+#: heat/common/exception.py:117
+#, python-format
+msgid "%(name)s: %(value)s"
+msgstr ""
+
#: heat/common/heat_keystoneclient.py:151
#: heat/common/heat_keystoneclient.py:231
msgid "Domain admin client authentication failed"
@@ -77,7 +82,7 @@ msgstr ""
#: heat/common/lifecycle_plugin_utils.py:111
#, python-format
-msgid "%(opname) %(ci)s failed for %(a)s on %(sid)s"
+msgid "%(opname)s %(ci)s failed for %(a)s on %(sid)s"
msgstr ""
#: heat/common/plugin_loader.py:100
@@ -85,30 +90,30 @@ msgstr ""
msgid "Failed to import module %s"
msgstr ""
-#: heat/common/wsgi.py:272
+#: heat/common/wsgi.py:291
msgid "SIGTERM received"
msgstr ""
-#: heat/common/wsgi.py:281
+#: heat/common/wsgi.py:301
msgid "SIGHUP received"
msgstr ""
-#: heat/common/wsgi.py:311
+#: heat/common/wsgi.py:440
#, python-format
-msgid "Removing dead child %s"
+msgid "Not respawning child %d, cannot recover from termination"
msgstr ""
-#: heat/common/wsgi.py:672
+#: heat/common/wsgi.py:855
#, python-format
msgid "Exception handling resource: %s"
msgstr ""
-#: heat/common/wsgi.py:694
+#: heat/common/wsgi.py:877
#, python-format
msgid "Returning %(code)s to user: %(explanation)s"
msgstr ""
-#: heat/common/wsgi.py:768
+#: heat/common/wsgi.py:951
#, python-format
msgid "Unexpected error occurred serving API: %s"
msgstr ""
@@ -121,17 +126,17 @@ msgstr ""
msgid "Unexpected number of keys in watch_data.data!"
msgstr ""
-#: heat/engine/environment.py:600
+#: heat/engine/environment.py:610
#, python-format
msgid "Failed to read %s"
msgstr ""
-#: heat/engine/environment.py:612
+#: heat/engine/environment.py:622
#, python-format
msgid "Failed to parse %(file_path)s"
msgstr ""
-#: heat/engine/environment.py:616
+#: heat/engine/environment.py:626
#, python-format
msgid "Failed to read %(file_path)s"
msgstr ""
@@ -146,16 +151,17 @@ msgstr ""
msgid "Invalid type for %(mapping_name)s from %(module)s"
msgstr ""
-#: heat/engine/resource.py:550
+#: heat/engine/resource.py:614
msgid "Error marking resource as failed"
msgstr ""
-#: heat/engine/resource.py:1106 heat/engine/resource.py:1151
+#: heat/engine/resource.py:1142 heat/engine/resource.py:1187
+#: heat/engine/resource.py:1205
#, python-format
msgid "DB error %s"
msgstr ""
-#: heat/engine/resource.py:1315
+#: heat/engine/resource.py:1393
#, python-format
msgid "signal %(name)s : %(msg)s"
msgstr ""
@@ -165,11 +171,11 @@ msgstr ""
msgid "Failed to stop engine service, %s"
msgstr ""
-#: heat/engine/service.py:1448
+#: heat/engine/service.py:1456
msgid "Filtering by namespace/metric not yet supported"
msgstr ""
-#: heat/engine/service.py:1588
+#: heat/engine/service.py:1605
#, python-format
msgid "Service %(service_id)s update failed: %(error)s"
msgstr ""
@@ -184,38 +190,37 @@ msgstr ""
msgid "Exception: %s"
msgstr ""
-#: heat/engine/stack.py:1090
+#: heat/engine/stack.py:1076
#, python-format
msgid "Unexpected action %s passed to update!"
msgstr ""
-#: heat/engine/stack.py:1327
+#: heat/engine/stack.py:1313
#, python-format
msgid "Unexpected action %s passed to delete!"
msgstr ""
-#: heat/engine/stack.py:1518
+#: heat/engine/stack.py:1504
#, python-format
msgid "Resource %(name)s delete failed: %(ex)s"
msgstr ""
-#: heat/engine/stack.py:1527
+#: heat/engine/stack.py:1513
#, python-format
msgid "Resource %(name)s create failed: %(ex)s"
msgstr ""
-#: heat/engine/worker.py:82
+#: heat/engine/worker.py:86
#, python-format
msgid "WorkerService is failed to stop, %s"
msgstr ""
-#: heat/engine/clients/__init__.py:93
-#, python-format
-msgid "Invalid cloud_backend setting in heat.conf detected - %s"
+#: heat/engine/clients/os/zaqar.py:39
+msgid "Zaqar connection failed, no auth_token!"
msgstr ""
-#: heat/engine/clients/os/zaqar.py:36
-msgid "Zaqar connection failed, no auth_token!"
+#: heat/engine/resources/stack_resource.py:439
+msgid "update_stack"
msgstr ""
#: heat/engine/resources/wait_condition.py:73
@@ -223,7 +228,7 @@ msgstr ""
msgid "Metadata failed validation for %s"
msgstr ""
-#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:347
+#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:348
msgid "Failed sending error notification"
msgstr ""
@@ -238,38 +243,14 @@ msgstr ""
msgid "Port not specified."
msgstr ""
-#: heat/engine/resources/openstack/heat/remote_stack.py:152
+#: heat/engine/resources/openstack/heat/remote_stack.py:153
#, python-format
msgid "exception: %s"
msgstr ""
-#: heat/engine/resources/openstack/nova/nova_floatingip.py:77
+#: heat/engine/resources/openstack/nova/nova_floatingip.py:79
msgid ""
"Could not allocate floating IP. Probably there is no default floating IP "
"pool is configured."
msgstr ""
-#: heat/openstack/common/loopingcall.py:95
-msgid "in fixed duration looping call"
-msgstr ""
-
-#: heat/openstack/common/loopingcall.py:138
-msgid "in dynamic looping call"
-msgstr ""
-
-#: heat/openstack/common/service.py:266
-msgid "Unhandled exception"
-msgstr ""
-
-#: heat/openstack/common/threadgroup.py:103
-msgid "Error stopping thread."
-msgstr ""
-
-#: heat/openstack/common/threadgroup.py:110
-msgid "Error stopping timer."
-msgstr ""
-
-#: heat/openstack/common/threadgroup.py:137
-msgid "Error waiting on ThreadGroup."
-msgstr ""
-
diff --git a/heat/locale/heat-log-info.pot b/heat/locale/heat-log-info.pot
index 335db9848..1adfdb14a 100644
--- a/heat/locale/heat-log-info.pot
+++ b/heat/locale/heat-log-info.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: heat 2015.2.0.dev7\n"
+"Project-Id-Version: heat 5.0.0.0b2.dev164\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-06-22 06:02+0000\n"
+"POT-Creation-Date: 2015-07-08 06:01+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -46,7 +46,7 @@ msgstr ""
msgid "AWS authentication failure."
msgstr ""
-#: heat/api/cfn/v1/stacks.py:440 heat/engine/service.py:849
+#: heat/api/cfn/v1/stacks.py:440 heat/engine/service.py:851
msgid "validate_template"
msgstr ""
@@ -60,85 +60,104 @@ msgstr ""
msgid "Fetching data from %s"
msgstr ""
-#: heat/common/wsgi.py:300 heat/openstack/common/service.py:328
+#: heat/common/wsgi.py:326
#, python-format
msgid "Starting %d workers"
msgstr ""
-#: heat/common/wsgi.py:318
+#: heat/common/wsgi.py:344
msgid "Caught keyboard interrupt. Exiting."
msgstr ""
-#: heat/common/wsgi.py:341
+#: heat/common/wsgi.py:429
+#, python-format
+msgid "Removed dead child %s"
+msgstr ""
+
+#: heat/common/wsgi.py:432
+#, python-format
+msgid "Removed stale child %s"
+msgstr ""
+
+#: heat/common/wsgi.py:444
+msgid "All workers have terminated. Exiting"
+msgstr ""
+
+#: heat/common/wsgi.py:522
#, python-format
msgid "Child %d exiting normally"
msgstr ""
-#: heat/common/wsgi.py:344
+#: heat/common/wsgi.py:527
#, python-format
msgid "Started child %s"
msgstr ""
-#: heat/common/wsgi.py:371
+#: heat/common/wsgi.py:554
msgid "Starting single process server"
msgstr ""
-#: heat/engine/environment.py:262
+#: heat/engine/environment.py:264
#, python-format
msgid "Registering %(path)s -> %(value)s"
msgstr ""
-#: heat/engine/environment.py:607
+#: heat/engine/environment.py:617
#, python-format
msgid "Loading %s"
msgstr ""
-#: heat/engine/resource.py:336
+#: heat/engine/resource.py:356
#, python-format
msgid "Reached hook on %s"
msgstr ""
-#: heat/engine/resource.py:671
+#: heat/engine/resource.py:602
+#, python-format
+msgid "%(action)s: %(info)s"
+msgstr ""
+
+#: heat/engine/resource.py:713
#, python-format
msgid "creating %s"
msgstr ""
-#: heat/engine/resource.py:852
+#: heat/engine/resource.py:888
#, python-format
msgid "updating %s"
msgstr ""
-#: heat/engine/resource.py:877
+#: heat/engine/resource.py:913
#, python-format
msgid "Checking %s"
msgstr ""
-#: heat/engine/resource.py:914
+#: heat/engine/resource.py:950
#, python-format
msgid "suspending %s"
msgstr ""
-#: heat/engine/resource.py:930
+#: heat/engine/resource.py:966
#, python-format
msgid "resuming %s"
msgstr ""
-#: heat/engine/resource.py:935
+#: heat/engine/resource.py:971
#, python-format
msgid "snapshotting %s"
msgstr ""
-#: heat/engine/resource.py:981
+#: heat/engine/resource.py:1017
#, python-format
msgid "Validating %s"
msgstr ""
-#: heat/engine/resource.py:1050
+#: heat/engine/resource.py:1070
#, python-format
msgid "deleting %s"
msgstr ""
-#: heat/engine/resource.py:1311
+#: heat/engine/resource.py:1373
#, python-format
msgid "Clearing %(hook)s hook on %(resource)s"
msgstr ""
@@ -148,103 +167,103 @@ msgstr ""
msgid "%s timed out"
msgstr ""
-#: heat/engine/service.py:366
+#: heat/engine/service.py:367
msgid "Engine service is stopped successfully"
msgstr ""
-#: heat/engine/service.py:376
+#: heat/engine/service.py:377
#, python-format
msgid "WorkerService is stopped in engine %s"
msgstr ""
-#: heat/engine/service.py:384
+#: heat/engine/service.py:385
#, python-format
msgid "Waiting stack %s processing to be finished"
msgstr ""
-#: heat/engine/service.py:388
+#: heat/engine/service.py:389
#, python-format
msgid "Stack %s processing was finished"
msgstr ""
-#: heat/engine/service.py:393
+#: heat/engine/service.py:394
#, python-format
msgid "Service %s is deleted"
msgstr ""
-#: heat/engine/service.py:396
+#: heat/engine/service.py:397
msgid "All threads were gone, terminating engine"
msgstr ""
-#: heat/engine/service.py:637
+#: heat/engine/service.py:638
#, python-format
msgid "previewing stack %s"
msgstr ""
-#: heat/engine/service.py:678
+#: heat/engine/service.py:679
#, python-format
msgid "Creating stack %s"
msgstr ""
-#: heat/engine/service.py:702
+#: heat/engine/service.py:703
#, python-format
msgid "Stack create failed, status %s"
msgstr ""
-#: heat/engine/service.py:742
+#: heat/engine/service.py:743
#, python-format
msgid "Updating stack %s"
msgstr ""
-#: heat/engine/service.py:815
+#: heat/engine/service.py:817
#, python-format
msgid "Starting cancel of updating stack %s"
msgstr ""
-#: heat/engine/service.py:948
+#: heat/engine/service.py:950
#, python-format
msgid "Deleting stack %s"
msgstr ""
-#: heat/engine/service.py:1006
+#: heat/engine/service.py:1008
#, python-format
msgid "abandoning stack %s"
msgstr ""
-#: heat/engine/service.py:1303
+#: heat/engine/service.py:1317
#, python-format
msgid "%(stack)s is in state %(action)s_IN_PROGRESS, snapshot is not permitted."
msgstr ""
-#: heat/engine/service.py:1353
+#: heat/engine/service.py:1367
#, python-format
msgid "Checking stack %s"
msgstr ""
-#: heat/engine/service.py:1573
+#: heat/engine/service.py:1596
#, python-format
msgid "Service %s is started"
msgstr ""
-#: heat/engine/service.py:1580
+#: heat/engine/service.py:1603
#, python-format
msgid "Service %s is updated"
msgstr ""
-#: heat/engine/service.py:1601
+#: heat/engine/service.py:1624
#, python-format
msgid "Service %s was aborted"
msgstr ""
-#: heat/engine/service.py:1623
+#: heat/engine/service.py:1646
#, python-format
msgid ""
"Engine %(engine)s went down when stack %(stack_id)s was in action "
"%(action)s"
msgstr ""
-#: heat/engine/service_software_config.py:104
-#: heat/engine/service_software_config.py:126
+#: heat/engine/service_software_config.py:123
+#: heat/engine/service_software_config.py:145
#, python-format
msgid "Signal object not found: %(c)s %(o)s"
msgstr ""
@@ -254,43 +273,48 @@ msgstr ""
msgid "Stack %(action)s %(status)s (%(name)s): %(reason)s"
msgstr ""
-#: heat/engine/stack.py:962
+#: heat/engine/stack.py:977
#, python-format
msgid "convergence_dependencies: %s"
msgstr ""
-#: heat/engine/stack.py:981
+#: heat/engine/stack.py:996
#, python-format
msgid "Triggering resource %(rsrc_id)s for %(is_update)s update"
msgstr ""
-#: heat/engine/stack.py:1261
+#: heat/engine/stack.py:1275
#, python-format
msgid ""
"Tried to delete user_creds that do not exist (stack=%(stack)s "
"user_creds_id=%(uc)s)"
msgstr ""
-#: heat/engine/stack.py:1269
+#: heat/engine/stack.py:1283
#, python-format
msgid "Tried to store a stack that does not exist %s"
msgstr ""
-#: heat/engine/stack.py:1360 heat/engine/stack.py:1372
+#: heat/engine/stack.py:1374 heat/engine/stack.py:1386
#, python-format
msgid "Tried to delete stack that does not exist %s "
msgstr ""
-#: heat/engine/stack.py:1388
+#: heat/engine/stack.py:1402
#, python-format
msgid "%s is already suspended"
msgstr ""
-#: heat/engine/stack.py:1409
+#: heat/engine/stack.py:1423
#, python-format
msgid "%s is already resumed"
msgstr ""
+#: heat/engine/stack.py:1599
+#, python-format
+msgid "[%(name)s(%(id)s)] update traversal %(tid)s complete"
+msgstr ""
+
#: heat/engine/stack_lock.py:82
#, python-format
msgid ""
@@ -342,14 +366,19 @@ msgstr ""
msgid "no action for new state %s"
msgstr ""
-#: heat/engine/worker.py:66
+#: heat/engine/worker.py:70
msgid "Starting WorkerService ..."
msgstr ""
-#: heat/engine/worker.py:77
+#: heat/engine/worker.py:81
msgid "Stopping WorkerService ..."
msgstr ""
+#: heat/engine/worker.py:103
+#, python-format
+msgid "Triggering rollback of %(stack_name)s %(action)s "
+msgstr ""
+
#: heat/engine/clients/os/cinder.py:65
#, python-format
msgid "Creating Cinder client with volume API version %d."
@@ -380,7 +409,7 @@ msgstr ""
msgid "Multiple images %s were found in glance with name"
msgstr ""
-#: heat/engine/clients/os/nova.py:606
+#: heat/engine/clients/os/nova.py:616
#, python-format
msgid "Volume %(vol)s is detached from server %(srv)s"
msgstr ""
@@ -461,7 +490,7 @@ msgstr ""
msgid "%(name)s NOT performing scaling action, cooldown %(cooldown)s"
msgstr ""
-#: heat/engine/resources/openstack/heat/scaling_policy.py:179
+#: heat/engine/resources/openstack/heat/scaling_policy.py:180
#, python-format
msgid ""
"%(name)s Alarm, adjusting Group %(group)s with id %(asgn_id)s by "
@@ -485,7 +514,11 @@ msgstr ""
msgid "%(name)s Timed out (%(timeout)s)"
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1271
+#: heat/engine/resources/openstack/manila/share.py:231
+msgid "Applying access rules to created Share."
+msgstr ""
+
+#: heat/engine/resources/openstack/nova/server.py:1291
#, python-format
msgid ""
"For the server \"%(server)s\" the \"%(uuid)s\" property is set to network"
@@ -552,55 +585,3 @@ msgstr ""
msgid "Starting Heat ReST API on %(host)s:%(port)s"
msgstr ""
-#: heat/openstack/common/eventlet_backdoor.py:146
-#, python-format
-msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
-msgstr ""
-
-#: heat/openstack/common/service.py:166
-#, python-format
-msgid "Caught %s, exiting"
-msgstr ""
-
-#: heat/openstack/common/service.py:232
-msgid "Parent process has died unexpectedly, exiting"
-msgstr ""
-
-#: heat/openstack/common/service.py:260
-#, python-format
-msgid "Child caught %s, exiting"
-msgstr ""
-
-#: heat/openstack/common/service.py:299
-msgid "Forking too fast, sleeping"
-msgstr ""
-
-#: heat/openstack/common/service.py:318
-#, python-format
-msgid "Started child %d"
-msgstr ""
-
-#: heat/openstack/common/service.py:345
-#, python-format
-msgid "Child %(pid)d killed by signal %(sig)d"
-msgstr ""
-
-#: heat/openstack/common/service.py:349
-#, python-format
-msgid "Child %(pid)s exited with status %(code)d"
-msgstr ""
-
-#: heat/openstack/common/service.py:388
-#, python-format
-msgid "Caught %s, stopping children"
-msgstr ""
-
-#: heat/openstack/common/service.py:403
-msgid "Wait called after thread killed. Cleaning up."
-msgstr ""
-
-#: heat/openstack/common/service.py:419
-#, python-format
-msgid "Waiting on %d children to exit"
-msgstr ""
-
diff --git a/heat/locale/heat-log-warning.pot b/heat/locale/heat-log-warning.pot
index 915ccb45c..c8ed2dcb2 100644
--- a/heat/locale/heat-log-warning.pot
+++ b/heat/locale/heat-log-warning.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: heat 2015.2.0.dev518\n"
+"Project-Id-Version: heat 5.0.0.0b2.dev164\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-06-17 06:06+0000\n"
+"POT-Creation-Date: 2015-07-08 06:02+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -33,6 +33,10 @@ msgid ""
"falling back to using default"
msgstr ""
+#: heat/common/config.py:331
+msgid "Please update auth_encryption_key to be 32 characters."
+msgstr ""
+
#: heat/common/heat_keystoneclient.py:269
#, python-format
msgid "Truncating the username %s to the last 64 characters."
@@ -60,46 +64,63 @@ msgid ""
"enabled=false"
msgstr ""
-#: heat/common/wsgi.py:733
+#: heat/common/wsgi.py:434
+#, python-format
+msgid "Unrecognised child %s"
+msgstr ""
+
+#: heat/common/wsgi.py:916
msgid "Unable to serialize exception response"
msgstr ""
-#: heat/engine/environment.py:240 heat/engine/environment.py:246
+#: heat/engine/attributes.py:176 heat/engine/attributes.py:182
+#: heat/engine/attributes.py:187 heat/engine/attributes.py:192
+#: heat/engine/attributes.py:199
+#, python-format
+msgid "Attribute %(name)s is not of type %(att_type)s"
+msgstr ""
+
+#: heat/engine/environment.py:242 heat/engine/environment.py:248
#, python-format
msgid "Removing %(item)s from %(path)s"
msgstr ""
-#: heat/engine/environment.py:259
+#: heat/engine/environment.py:261
#, python-format
msgid "Changing %(path)s from %(was)s to %(now)s"
msgstr ""
-#: heat/engine/resource.py:1075
+#: heat/engine/resource.py:1111
#, python-format
msgid "db error %s"
msgstr ""
-#: heat/engine/resource.py:1156
+#: heat/engine/resource.py:1192
#, python-format
msgid "Resource \"%s\" not pre-stored in DB"
msgstr ""
-#: heat/engine/resource.py:1329
+#: heat/engine/resource.py:1235
+#, python-format
+msgid "Failed to unlock resource %s"
+msgstr ""
+
+#: heat/engine/resource.py:1407
#, python-format
msgid "Resource %s does not implement metadata update"
msgstr ""
-#: heat/engine/service.py:1169
+#: heat/engine/service.py:1177
#, python-format
msgid "Access denied to resource %s"
msgstr ""
-#: heat/engine/service.py:1425
+#: heat/engine/service.py:1433
#, python-format
msgid "show_watch (all) db error %s"
msgstr ""
-#: heat/engine/service.py:1454
+#: heat/engine/service.py:1462
#, python-format
msgid "show_metric (all) db error %s"
msgstr ""
@@ -113,7 +134,7 @@ msgstr ""
msgid "Unable to set parameters StackId identifier"
msgstr ""
-#: heat/engine/stack_lock.py:131
+#: heat/engine/stack_lock.py:119
#, python-format
msgid "Lock was already released on stack %s!"
msgstr ""
@@ -138,49 +159,49 @@ msgstr ""
msgid "Requested client \"%s\" not found"
msgstr ""
-#: heat/engine/clients/os/nova.py:117
+#: heat/engine/clients/os/nova.py:125
#, python-format
msgid "Server (%(server)s) not found: %(ex)s"
msgstr ""
-#: heat/engine/clients/os/nova.py:133
+#: heat/engine/clients/os/nova.py:141
#, python-format
msgid ""
"Received an OverLimit response when fetching server (%(id)s) : "
"%(exception)s"
msgstr ""
-#: heat/engine/clients/os/nova.py:140
+#: heat/engine/clients/os/nova.py:148
#, python-format
msgid ""
"Received the following exception when fetching server (%(id)s) : "
"%(exception)s"
msgstr ""
-#: heat/engine/clients/os/nova.py:156
+#: heat/engine/clients/os/nova.py:164
#, python-format
msgid ""
"Server %(name)s (%(id)s) received an OverLimit response during "
"server.get(): %(exception)s"
msgstr ""
-#: heat/engine/clients/os/nova.py:164
+#: heat/engine/clients/os/nova.py:172
#, python-format
msgid ""
"Server \"%(name)s\" (%(id)s) received the following exception during "
"server.get(): %(exception)s"
msgstr ""
-#: heat/engine/clients/os/nova.py:486
+#: heat/engine/clients/os/nova.py:502
#, python-format
msgid "Instance (%(server)s) not found: %(ex)s"
msgstr ""
-#: heat/engine/resources/signal_responder.py:67
+#: heat/engine/resources/signal_responder.py:68
msgid "Cannot generate signed url, no stored access/secret key"
msgstr ""
-#: heat/engine/resources/stack_resource.py:160
+#: heat/engine/resources/stack_resource.py:170
#, python-format
msgid "Preview of '%s' not yet implemented"
msgstr ""
@@ -198,66 +219,56 @@ msgstr ""
msgid "Skipping association, resource not specified"
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:371
+#: heat/engine/resources/openstack/neutron/port.py:388
#, python-format
msgid "Failed to fetch resource attributes: %s"
msgstr ""
-#: heat/engine/resources/openstack/swift/swift.py:235
+#: heat/engine/resources/openstack/swift/swift.py:241
#, python-format
msgid "Head container failed: %s"
msgstr ""
-#: heat/engine/resources/openstack/trove/os_database.py:369
+#: heat/engine/resources/openstack/trove/os_database.py:371
#, python-format
msgid ""
"Stack %(name)s (%(id)s) received an OverLimit response during "
"instance.get(): %(exception)s"
msgstr ""
-#: heat/engine/resources/openstack/trove/trove_cluster.py:164
+#: heat/engine/resources/openstack/trove/trove_cluster.py:166
#, python-format
msgid ""
"Stack %(name)s (%(id)s) received an OverLimit response during "
"clusters.get(): %(exception)s"
msgstr ""
-#: heat/openstack/common/loopingcall.py:87
-#, python-format
-msgid "task %(func_name)r run outlasted interval by %(delay).2f sec"
-msgstr ""
-
-#: heat/openstack/common/service.py:353
-#, python-format
-msgid "pid %d not in child list"
-msgstr ""
-
-#: heat/tests/generic_resource.py:37
+#: heat/tests/generic_resource.py:41
#, python-format
msgid "Creating generic resource (Type \"%s\")"
msgstr ""
-#: heat/tests/generic_resource.py:41
+#: heat/tests/generic_resource.py:45
#, python-format
msgid "Updating generic resource (Type \"%s\")"
msgstr ""
-#: heat/tests/generic_resource.py:45
+#: heat/tests/generic_resource.py:49
#, python-format
msgid "Deleting generic resource (Type \"%s\")"
msgstr ""
-#: heat/tests/generic_resource.py:52
+#: heat/tests/generic_resource.py:56
#, python-format
msgid "Suspending generic resource (Type \"%s\")"
msgstr ""
-#: heat/tests/generic_resource.py:56
+#: heat/tests/generic_resource.py:60
#, python-format
msgid "Resuming generic resource (Type \"%s\")"
msgstr ""
-#: heat/tests/generic_resource.py:143
+#: heat/tests/generic_resource.py:147
#, python-format
msgid "Signaled resource (Type \"%(type)s\") %(details)s"
msgstr ""
diff --git a/heat/locale/heat.pot b/heat/locale/heat.pot
index ee7808874..20320c214 100644
--- a/heat/locale/heat.pot
+++ b/heat/locale/heat.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: heat 2015.2.0.dev7\n"
+"Project-Id-Version: heat 5.0.0.0b2.dev164\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-06-22 06:02+0000\n"
+"POT-Creation-Date: 2015-07-08 06:01+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -237,7 +237,7 @@ msgstr ""
msgid "No template specified"
msgstr ""
-#: heat/api/openstack/v1/stacks.py:538
+#: heat/api/openstack/v1/stacks.py:548
#, python-format
msgid "Template type is not supported: %s"
msgstr ""
@@ -553,25 +553,25 @@ msgid ""
"\"stack_domain_admin_password\""
msgstr ""
-#: heat/common/config.py:330
-msgid "heat.conf misconfigured, auth_encryption_key length must be 16, 24 or 32"
+#: heat/common/config.py:333
+msgid "heat.conf misconfigured, auth_encryption_key must be 32 characters"
msgstr ""
-#: heat/common/config.py:407
+#: heat/common/config.py:410
msgid "Unable to locate config file"
msgstr ""
-#: heat/common/config.py:419
+#: heat/common/config.py:422
#, python-format
msgid ""
"Unable to load %(app_name)s from configuration file %(conf_file)s.\n"
"Got: %(e)r"
msgstr ""
-#: heat/common/crypt.py:27
+#: heat/common/crypt.py:29
msgid ""
"Key used to encrypt authentication info in the database. Length of this "
-"key must be 16, 24 or 32 characters."
+"key must be 32 characters."
msgstr ""
#: heat/common/custom_backend_auth.py:60
@@ -591,308 +591,314 @@ msgstr ""
msgid "An unknown exception occurred."
msgstr ""
-#: heat/common/exception.py:133
+#: heat/common/exception.py:134
#, python-format
msgid "Missing required credential: %(required)s"
msgstr ""
-#: heat/common/exception.py:137
+#: heat/common/exception.py:138
#, python-format
msgid ""
"Incorrect auth strategy, expected \"%(expected)s\" but received "
"\"%(received)s\""
msgstr ""
-#: heat/common/exception.py:142
+#: heat/common/exception.py:143
#, python-format
msgid "Connect error/bad request to Auth service at URL %(url)s."
msgstr ""
-#: heat/common/exception.py:146
+#: heat/common/exception.py:147
#, python-format
msgid "Auth service at URL %(url)s not found."
msgstr ""
-#: heat/common/exception.py:150
+#: heat/common/exception.py:151
msgid "Authorization failed."
msgstr ""
-#: heat/common/exception.py:154
+#: heat/common/exception.py:155
msgid "You are not authenticated."
msgstr ""
-#: heat/common/exception.py:158 heat/common/exception.py:163
+#: heat/common/exception.py:159 heat/common/exception.py:164
msgid "You are not authorized to complete this action."
msgstr ""
-#: heat/common/exception.py:167
+#: heat/common/exception.py:168
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr ""
-#: heat/common/exception.py:171
+#: heat/common/exception.py:172
#, python-format
msgid "Redirecting to %(uri)s for authorization."
msgstr ""
-#: heat/common/exception.py:175
+#: heat/common/exception.py:176
msgid "The URI was too long."
msgstr ""
-#: heat/common/exception.py:179
+#: heat/common/exception.py:180
#, python-format
msgid "Maximum redirects (%(redirects)s) was exceeded."
msgstr ""
-#: heat/common/exception.py:183
+#: heat/common/exception.py:184
msgid "Received invalid HTTP redirect."
msgstr ""
-#: heat/common/exception.py:187
+#: heat/common/exception.py:188
#, python-format
msgid ""
"Multiple 'image' service matches for region %(region)s. This generally "
"means that a region is required and you have not supplied one."
msgstr ""
-#: heat/common/exception.py:193
+#: heat/common/exception.py:194
#, python-format
msgid "The Parameter (%(key)s) was not provided."
msgstr ""
-#: heat/common/exception.py:197
+#: heat/common/exception.py:198
#, python-format
msgid "The Parameter (%(key)s) was not defined in template."
msgstr ""
-#: heat/common/exception.py:201
+#: heat/common/exception.py:202
#, python-format
msgid "The template version is invalid: %(explanation)s"
msgstr ""
-#: heat/common/exception.py:205
+#: heat/common/exception.py:206
#, python-format
msgid "The template section is invalid: %(section)s"
msgstr ""
-#: heat/common/exception.py:209
+#: heat/common/exception.py:210
#, python-format
msgid "The Parameter (%(key)s) has no attributes."
msgstr ""
-#: heat/common/exception.py:213
+#: heat/common/exception.py:214
#, python-format
msgid "The Referenced Attribute (%(resource)s %(key)s) is incorrect."
msgstr ""
-#: heat/common/exception.py:218
+#: heat/common/exception.py:219
#, python-format
msgid "The specified reference \"%(resource)s\" (in %(key)s) is incorrect."
msgstr ""
-#: heat/common/exception.py:223
+#: heat/common/exception.py:224
#, python-format
msgid "The Key (%(key_name)s) could not be found."
msgstr ""
-#: heat/common/exception.py:227
+#: heat/common/exception.py:228
#, python-format
msgid "The Flavor ID (%(flavor_id)s) could not be found."
msgstr ""
-#: heat/common/exception.py:231
+#: heat/common/exception.py:232
#, python-format
msgid "The %(entity)s (%(name)s) could not be found."
msgstr ""
-#: heat/common/exception.py:235
+#: heat/common/exception.py:236
#, python-format
msgid "The Nova network (%(network)s) could not be found."
msgstr ""
-#: heat/common/exception.py:239
+#: heat/common/exception.py:240
#, python-format
msgid "Multiple physical resources were found with name (%(name)s)."
msgstr ""
-#: heat/common/exception.py:244
+#: heat/common/exception.py:245
#, python-format
msgid "Searching Tenant %(target)s from Tenant %(actual)s forbidden."
msgstr ""
-#: heat/common/exception.py:249
+#: heat/common/exception.py:250
#, python-format
msgid "The Stack (%(stack_name)s) could not be found."
msgstr ""
-#: heat/common/exception.py:253
+#: heat/common/exception.py:254
#, python-format
msgid "The Stack (%(stack_name)s) already exists."
msgstr ""
-#: heat/common/exception.py:257
+#: heat/common/exception.py:258
#, python-format
msgid "%(error)s%(path)s%(message)s"
msgstr ""
-#: heat/common/exception.py:295 heat/common/exception.py:309
-#: heat/common/exception.py:317 heat/common/exception.py:321
+#: heat/common/exception.py:300 heat/common/exception.py:314
+#: heat/common/exception.py:322 heat/common/exception.py:326
#, python-format
msgid "%(message)s"
msgstr ""
-#: heat/common/exception.py:299
+#: heat/common/exception.py:304
#, python-format
msgid ""
"The Resource (%(resource_name)s) could not be found in Stack "
"%(stack_name)s."
msgstr ""
-#: heat/common/exception.py:304
+#: heat/common/exception.py:309
#, python-format
msgid "The Snapshot (%(snapshot)s) for Stack (%(stack)s) could not be found."
msgstr ""
-#: heat/common/exception.py:313
+#: heat/common/exception.py:318
#, python-format
msgid "The Resource Type (%(type_name)s) could not be found."
msgstr ""
-#: heat/common/exception.py:325
+#: heat/common/exception.py:330
#, python-format
msgid "The Resource (%(resource_name)s) is not available."
msgstr ""
-#: heat/common/exception.py:329
+#: heat/common/exception.py:334
#, python-format
msgid "The Resource (%(resource_id)s) could not be found."
msgstr ""
-#: heat/common/exception.py:333
+#: heat/common/exception.py:338
#, python-format
msgid "The Watch Rule (%(watch_name)s) could not be found."
msgstr ""
-#: heat/common/exception.py:337
-#, python-format
-msgid "%(exc_type)s: %(message)s"
-msgstr ""
-
-#: heat/common/exception.py:351
+#: heat/common/exception.py:401
#, python-format
msgid "%(feature)s is not supported."
msgstr ""
-#: heat/common/exception.py:355
+#: heat/common/exception.py:405
#, python-format
msgid "%(action)s is not supported for resource."
msgstr ""
-#: heat/common/exception.py:359
+#: heat/common/exception.py:409
#, python-format
msgid "Cannot define the following properties at the same time: %(props)s."
msgstr ""
-#: heat/common/exception.py:369
+#: heat/common/exception.py:419
#, python-format
msgid "%(prop1)s cannot be specified without %(prop2)s."
msgstr ""
-#: heat/common/exception.py:373
+#: heat/common/exception.py:423
#, python-format
msgid ""
"%(prop1)s property should only be specified for %(prop2)s with value "
"%(value)s."
msgstr ""
-#: heat/common/exception.py:378
+#: heat/common/exception.py:428
#, python-format
msgid "At least one of the following properties must be specified: %(props)s"
msgstr ""
-#: heat/common/exception.py:398
+#: heat/common/exception.py:448
msgid ""
"Egress rules are only allowed when Neutron is used and the 'VpcId' "
"property is set."
msgstr ""
-#: heat/common/exception.py:410
+#: heat/common/exception.py:460
msgid "Not found"
msgstr ""
-#: heat/common/exception.py:416
+#: heat/common/exception.py:466
#, python-format
msgid "Invalid content type %(content_type)s"
msgstr ""
-#: heat/common/exception.py:420
+#: heat/common/exception.py:470
#, python-format
msgid "Request limit exceeded: %(message)s"
msgstr ""
-#: heat/common/exception.py:424
+#: heat/common/exception.py:474
msgid "Maximum resources per stack exceeded."
msgstr ""
-#: heat/common/exception.py:428
+#: heat/common/exception.py:478
#, python-format
msgid "Stack %(stack_name)s already has an action (%(action)s) in progress."
msgstr ""
-#: heat/common/exception.py:433
+#: heat/common/exception.py:483
#, python-format
msgid "Failed to stop stack (%(stack_name)s) on other engine (%(engine_id)s)"
msgstr ""
-#: heat/common/exception.py:438
+#: heat/common/exception.py:488
#, python-format
msgid ""
"Failed to send message to stack (%(stack_name)s) on other engine "
"(%(engine_id)s)"
msgstr ""
-#: heat/common/exception.py:443
+#: heat/common/exception.py:493
#, python-format
msgid "Service %(service_id)s not found"
msgstr ""
-#: heat/common/exception.py:447
+#: heat/common/exception.py:497
#, python-format
msgid "Unsupported object type %(objtype)s"
msgstr ""
-#: heat/common/exception.py:451
+#: heat/common/exception.py:501
#, python-format
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr ""
-#: heat/common/exception.py:455
+#: heat/common/exception.py:505
#, python-format
msgid "Version %(objver)s of %(objname)s is not supported"
msgstr ""
-#: heat/common/exception.py:459
+#: heat/common/exception.py:509
#, python-format
msgid "Object action %(action)s failed because: %(reason)s"
msgstr ""
-#: heat/common/exception.py:463
+#: heat/common/exception.py:513
#, python-format
msgid "Cannot modify readonly field %(field)s"
msgstr ""
-#: heat/common/exception.py:467
+#: heat/common/exception.py:517
#, python-format
msgid "Field %(field)s of %(objname)s is not an instance of Field"
msgstr ""
-#: heat/common/exception.py:471
+#: heat/common/exception.py:521
#, python-format
msgid ""
"Keystone has more than one service with same name %(service)s. Please use"
" service id instead of name"
msgstr ""
+#: heat/common/exception.py:526
+msgid "System SIGHUP signal received."
+msgstr ""
+
+#: heat/common/exception.py:530
+#, python-format
+msgid ""
+"Service %(service_name)s does not have required endpoint in service "
+"catalog for the resource %(resource_name)s"
+msgstr ""
+
#: heat/common/heat_keystoneclient.py:252
#, python-format
msgid "roles %s"
@@ -949,7 +955,7 @@ msgstr ""
msgid "Unknown attribute \"%s\""
msgstr ""
-#: heat/common/identifier.py:179 heat/engine/resource.py:160
+#: heat/common/identifier.py:179 heat/engine/resource.py:174
msgid "Resource name may not contain \"/\""
msgstr ""
@@ -1029,171 +1035,178 @@ msgstr ""
msgid "Failed to retrieve template: %s"
msgstr ""
-#: heat/common/wsgi.py:62 heat/common/wsgi.py:96 heat/common/wsgi.py:130
+#: heat/common/wsgi.py:64 heat/common/wsgi.py:102 heat/common/wsgi.py:140
msgid ""
"Address to bind the server. Useful when selecting a particular network "
"interface."
msgstr ""
-#: heat/common/wsgi.py:66 heat/common/wsgi.py:100 heat/common/wsgi.py:134
+#: heat/common/wsgi.py:68 heat/common/wsgi.py:106 heat/common/wsgi.py:144
msgid "The port on which the server will listen."
msgstr ""
-#: heat/common/wsgi.py:69 heat/common/wsgi.py:103 heat/common/wsgi.py:137
+#: heat/common/wsgi.py:71 heat/common/wsgi.py:109 heat/common/wsgi.py:147
msgid "Number of backlog requests to configure the socket with."
msgstr ""
-#: heat/common/wsgi.py:73 heat/common/wsgi.py:107 heat/common/wsgi.py:141
+#: heat/common/wsgi.py:75 heat/common/wsgi.py:113 heat/common/wsgi.py:151
msgid "Location of the SSL certificate file to use for SSL mode."
msgstr ""
-#: heat/common/wsgi.py:77 heat/common/wsgi.py:111 heat/common/wsgi.py:145
+#: heat/common/wsgi.py:79 heat/common/wsgi.py:117 heat/common/wsgi.py:155
msgid "Location of the SSL key file to use for enabling SSL mode."
msgstr ""
-#: heat/common/wsgi.py:81 heat/common/wsgi.py:115 heat/common/wsgi.py:149
+#: heat/common/wsgi.py:83 heat/common/wsgi.py:121 heat/common/wsgi.py:159
msgid "Number of workers for Heat service."
msgstr ""
-#: heat/common/wsgi.py:84 heat/common/wsgi.py:118
+#: heat/common/wsgi.py:86 heat/common/wsgi.py:124
msgid ""
"Maximum line size of message headers to be accepted. max_header_line may "
"need to be increased when using large tokens (typically those generated "
"by the Keystone v3 API with big service catalogs)."
msgstr ""
-#: heat/common/wsgi.py:152
+#: heat/common/wsgi.py:91 heat/common/wsgi.py:129 heat/common/wsgi.py:167
+msgid ""
+"The value for the socket option TCP_KEEPIDLE. This is the time in "
+"seconds that the connection must be idle before TCP starts sending "
+"keepalive probes."
+msgstr ""
+
+#: heat/common/wsgi.py:162
msgid ""
"Maximum line size of message headers to be accepted. max_header_line may "
"need to be increased when using large tokens (typically those generated "
"by the Keystone v3 API with big service catalogs.)"
msgstr ""
-#: heat/common/wsgi.py:165
+#: heat/common/wsgi.py:179
msgid "If False, closes the client socket connection explicitly."
msgstr ""
-#: heat/common/wsgi.py:168
+#: heat/common/wsgi.py:182
msgid ""
"Timeout for client connections' socket operations. If an incoming "
"connection is idle for this number of seconds it will be closed. A value "
"of '0' means wait forever."
msgstr ""
-#: heat/common/wsgi.py:179
+#: heat/common/wsgi.py:193
msgid ""
"Maximum raw byte size of JSON request body. Should be larger than "
"max_template_size."
msgstr ""
-#: heat/common/wsgi.py:222
+#: heat/common/wsgi.py:236
msgid ""
"When running server in SSL mode, you must specify both a cert_file and "
"key_file option value in your configuration file"
msgstr ""
-#: heat/common/wsgi.py:240
+#: heat/common/wsgi.py:252
#, python-format
msgid "Could not bind to %(bind_addr)safter trying for 30 seconds"
msgstr ""
-#: heat/common/wsgi.py:601
+#: heat/common/wsgi.py:784
#, python-format
msgid ""
"JSON body size (%(len)s bytes) exceeds maximum allowed size (%(limit)s "
"bytes)."
msgstr ""
-#: heat/common/wsgi.py:673
+#: heat/common/wsgi.py:856
msgid ""
"The server could not comply with the request since it is either malformed"
" or otherwise incorrect."
msgstr ""
-#: heat/db/sqlalchemy/api.py:96
+#: heat/db/sqlalchemy/api.py:95
#, python-format
msgid "raw template with id %s not found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:129
+#: heat/db/sqlalchemy/api.py:128
#, python-format
msgid "resource with id %s not found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:161
+#: heat/db/sqlalchemy/api.py:160
msgid "no resources were found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:187
+#: heat/db/sqlalchemy/api.py:189
msgid "no resource data found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:266
+#: heat/db/sqlalchemy/api.py:252
msgid "No resource data found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:322
+#: heat/db/sqlalchemy/api.py:308
#, python-format
msgid "no resources for stack_id %s were found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:514
+#: heat/db/sqlalchemy/api.py:500
#, python-format
msgid "Attempt to update a stack with id: %(id)s %(msg)s"
msgstr ""
-#: heat/db/sqlalchemy/api.py:533
+#: heat/db/sqlalchemy/api.py:519
#, python-format
msgid "Attempt to delete a stack with id: %(id)s %(msg)s"
msgstr ""
-#: heat/db/sqlalchemy/api.py:633
+#: heat/db/sqlalchemy/api.py:619
msgid "Length of OS_PASSWORD after encryption exceeds Heat limit (255 chars)"
msgstr ""
-#: heat/db/sqlalchemy/api.py:658
+#: heat/db/sqlalchemy/api.py:646
#, python-format
msgid "Attempt to delete user creds with id %(id)s that does not exist"
msgstr ""
-#: heat/db/sqlalchemy/api.py:812
+#: heat/db/sqlalchemy/api.py:800
#, python-format
msgid "Attempt to update a watch with id: %(id)s %(msg)s"
msgstr ""
-#: heat/db/sqlalchemy/api.py:823
+#: heat/db/sqlalchemy/api.py:811
#, python-format
msgid "Attempt to delete watch_rule: %(id)s %(msg)s"
msgstr ""
-#: heat/db/sqlalchemy/api.py:868
+#: heat/db/sqlalchemy/api.py:856
#, python-format
msgid "Software config with id %s not found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:895
+#: heat/db/sqlalchemy/api.py:892
#, python-format
msgid "Deployment with id %s not found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:941
+#: heat/db/sqlalchemy/api.py:938
#, python-format
msgid "Snapshot with id %s not found"
msgstr ""
-#: heat/db/sqlalchemy/api.py:1022
+#: heat/db/sqlalchemy/api.py:1019
msgid "age should be an integer"
msgstr ""
-#: heat/db/sqlalchemy/api.py:1024
+#: heat/db/sqlalchemy/api.py:1021
msgid "age should be a positive integer"
msgstr ""
-#: heat/db/sqlalchemy/api.py:1028
+#: heat/db/sqlalchemy/api.py:1025
msgid "granularity should be days, hours, minutes, or seconds"
msgstr ""
-#: heat/db/sqlalchemy/api.py:1115
+#: heat/db/sqlalchemy/api.py:1112
msgid "Cannot migrate to lower schema version."
msgstr ""
@@ -1217,22 +1230,16 @@ msgstr ""
msgid "Invalid tag, \"%s\" contains a comma"
msgstr ""
-#: heat/engine/attributes.py:174 heat/engine/attributes.py:180
-#: heat/engine/attributes.py:185 heat/engine/attributes.py:190
-#, python-format
-msgid "Attribute %(name)s is not of type %(att_type)s"
-msgstr ""
-
-#: heat/engine/attributes.py:196
+#: heat/engine/attributes.py:205
#, python-format
msgid "%(resource)s: Invalid attribute %(key)s"
msgstr ""
-#: heat/engine/attributes.py:241
+#: heat/engine/attributes.py:250
msgid "Can't traverse attribute path"
msgstr ""
-#: heat/engine/attributes.py:244
+#: heat/engine/attributes.py:253
msgid "Path components in attributes must be strings"
msgstr ""
@@ -1256,7 +1263,7 @@ msgstr ""
msgid "%(name)s constraint invalid for %(utype)s"
msgstr ""
-#: heat/engine/constraints.py:146 heat/engine/parameters.py:89
+#: heat/engine/constraints.py:146 heat/engine/parameters.py:92
#, python-format
msgid "Invalid default %(default)s (%(exc)s)"
msgstr ""
@@ -1377,17 +1384,17 @@ msgid ""
"types are: %(types)s"
msgstr ""
-#: heat/engine/environment.py:409 heat/tests/test_resource.py:131
+#: heat/engine/environment.py:411 heat/tests/test_resource.py:132
#, python-format
msgid "Resource \"%s\" has no type"
msgstr ""
-#: heat/engine/environment.py:412
+#: heat/engine/environment.py:414
#, python-format
msgid "Non-empty resource type is required for resource \"%s\""
msgstr ""
-#: heat/engine/environment.py:416
+#: heat/engine/environment.py:418
#, python-format
msgid "Resource \"%s\" type is not a string"
msgstr ""
@@ -1433,46 +1440,46 @@ msgstr ""
msgid "Default must be a comma-delimited list string: %s"
msgstr ""
-#: heat/engine/parameters.py:107
+#: heat/engine/parameters.py:110
#, python-format
msgid "Invalid %s, expected a mapping"
msgstr ""
-#: heat/engine/parameters.py:111
+#: heat/engine/parameters.py:114
#, python-format
msgid "Invalid key '%(key)s' for %(entity)s"
msgstr ""
-#: heat/engine/parameters.py:122
+#: heat/engine/parameters.py:125
#, python-format
msgid "Missing parameter type for parameter: %s"
msgstr ""
-#: heat/engine/parameters.py:199
+#: heat/engine/parameters.py:200
#, python-format
msgid "Invalid Parameter type \"%s\""
msgstr ""
-#: heat/engine/parameters.py:220
+#: heat/engine/parameters.py:221
#, python-format
msgid "Parameter '%(name)s' is invalid: %(exp)s"
msgstr ""
-#: heat/engine/parameters.py:363
+#: heat/engine/parameters.py:365
#, python-format
msgid "Value must be a comma-delimited list string: %s"
msgstr ""
-#: heat/engine/parameters.py:413
+#: heat/engine/parameters.py:415
#, python-format
msgid "Value must be valid JSON: %s"
msgstr ""
-#: heat/engine/parameters.py:555 heat/engine/hot/parameters.py:136
+#: heat/engine/parameters.py:557 heat/engine/hot/parameters.py:136
msgid "Stack ID"
msgstr ""
-#: heat/engine/parameters.py:559 heat/engine/hot/parameters.py:145
+#: heat/engine/parameters.py:561 heat/engine/hot/parameters.py:145
msgid "Stack Name"
msgstr ""
@@ -1491,151 +1498,151 @@ msgstr ""
msgid "%(schema)s supplied for %(type)s %(data)s"
msgstr ""
-#: heat/engine/properties.py:246
+#: heat/engine/properties.py:245
#, python-format
msgid "Value '%s' is not an integer"
msgstr ""
-#: heat/engine/properties.py:262
+#: heat/engine/properties.py:261
msgid "Value must be a string"
msgstr ""
-#: heat/engine/properties.py:291
+#: heat/engine/properties.py:290
#, python-format
msgid "\"%s\" is not a map"
msgstr ""
-#: heat/engine/properties.py:301
+#: heat/engine/properties.py:300
#, python-format
msgid "\"%s\" is not a list"
msgstr ""
-#: heat/engine/properties.py:314
+#: heat/engine/properties.py:313
#, python-format
msgid "\"%s\" is not a valid boolean"
msgstr ""
-#: heat/engine/properties.py:374
+#: heat/engine/properties.py:373
#, python-format
msgid "Unknown Property %s"
msgstr ""
-#: heat/engine/properties.py:381
+#: heat/engine/properties.py:380
#, python-format
msgid "Property %(prop)s: %(ua)s and %(im)s cannot both be True"
msgstr ""
-#: heat/engine/properties.py:406
+#: heat/engine/properties.py:405
#, python-format
msgid "Property %s not implemented yet"
msgstr ""
-#: heat/engine/properties.py:427
+#: heat/engine/properties.py:426
#, python-format
msgid "Invalid Property %s"
msgstr ""
-#: heat/engine/properties.py:453
+#: heat/engine/properties.py:452
#, python-format
msgid "Property %s not assigned"
msgstr ""
-#: heat/engine/resource.py:63
+#: heat/engine/resource.py:64
#, python-format
msgid "The Resource %s requires replacement."
msgstr ""
-#: heat/engine/resource.py:72
+#: heat/engine/resource.py:73
#, python-format
msgid "Went to status %(resource_status)s due to \"%(status_reason)s\""
msgstr ""
-#: heat/engine/resource.py:75 heat/engine/resource.py:85
-#: heat/engine/clients/os/nova.py:223 heat/engine/clients/os/nova.py:224
+#: heat/engine/resource.py:76 heat/engine/resource.py:86
+#: heat/engine/clients/os/nova.py:230 heat/engine/clients/os/nova.py:231
#: heat/engine/resources/openstack/trove/os_database.py:392
#: heat/engine/resources/openstack/trove/trove_cluster.py:187
msgid "Unknown"
msgstr ""
-#: heat/engine/resource.py:81
+#: heat/engine/resource.py:82
#, python-format
msgid ""
"%(result)s - Unknown status %(resource_status)s due to "
"\"%(status_reason)s\""
msgstr ""
-#: heat/engine/resource.py:84
+#: heat/engine/resource.py:85
msgid "Resource failed"
msgstr ""
-#: heat/engine/resource.py:92
+#: heat/engine/resource.py:93
#, python-format
msgid "The resource %s is already being updated."
msgstr ""
-#: heat/engine/resource.py:333
+#: heat/engine/resource.py:353
#, python-format
msgid "%(a)s paused until Hook %(h)s is cleared"
msgstr ""
-#: heat/engine/resource.py:447
+#: heat/engine/resource.py:467
#, python-format
msgid "Update to properties %(props)s of %(name)s (%(res)s)"
msgstr ""
-#: heat/engine/resource.py:660
+#: heat/engine/resource.py:702
#, python-format
msgid "State %s invalid for create"
msgstr ""
-#: heat/engine/resource.py:747
+#: heat/engine/resource.py:789
msgid "Resource ID was not provided."
msgstr ""
-#: heat/engine/resource.py:849
+#: heat/engine/resource.py:885
msgid "Resource update already requested"
msgstr ""
-#: heat/engine/resource.py:892
+#: heat/engine/resource.py:928
#, python-format
msgid "'%(attr)s': expected '%(expected)s', got '%(current)s'"
msgstr ""
-#: heat/engine/resource.py:910
+#: heat/engine/resource.py:946
#, python-format
msgid "State %s invalid for suspend"
msgstr ""
-#: heat/engine/resource.py:926
+#: heat/engine/resource.py:962
#, python-format
msgid "State %s invalid for resume"
msgstr ""
-#: heat/engine/resource.py:975
+#: heat/engine/resource.py:1011
msgid "limit cannot be less than 4"
msgstr ""
-#: heat/engine/resource.py:1001
+#: heat/engine/resource.py:1037
#, python-format
msgid "Invalid deletion policy \"%s\""
msgstr ""
-#: heat/engine/resource.py:1006
+#: heat/engine/resource.py:1042
#, python-format
msgid "\"%s\" deletion policy not supported"
msgstr ""
-#: heat/engine/resource.py:1205 heat/engine/stack.py:697
+#: heat/engine/resource.py:1267 heat/engine/stack.py:697
#, python-format
msgid "Invalid action %s"
msgstr ""
-#: heat/engine/resource.py:1208 heat/engine/stack.py:700
+#: heat/engine/resource.py:1270 heat/engine/stack.py:700
#, python-format
msgid "Invalid status %s"
msgstr ""
-#: heat/engine/resource.py:1284
+#: heat/engine/resource.py:1346
#, python-format
msgid "Cannot signal resource during %s"
msgstr ""
@@ -1645,52 +1652,52 @@ msgstr ""
msgid "%s Timed out"
msgstr ""
-#: heat/engine/service.py:572
+#: heat/engine/service.py:573
#, python-format
msgid ""
"You have reached the maximum stacks per tenant, %d. Please delete some "
"stacks."
msgstr ""
-#: heat/engine/service.py:641 heat/engine/service.py:716
+#: heat/engine/service.py:642 heat/engine/service.py:717
msgid "Convergence engine"
msgstr ""
-#: heat/engine/service.py:747
+#: heat/engine/service.py:748
msgid "Updating a stack when it is suspended"
msgstr ""
-#: heat/engine/service.py:751
+#: heat/engine/service.py:752
msgid "Updating a stack when it is deleting"
msgstr ""
-#: heat/engine/service.py:812
+#: heat/engine/service.py:814
#, python-format
msgid "Cancelling update when stack is %s"
msgstr ""
-#: heat/engine/service.py:851
+#: heat/engine/service.py:853
msgid "No Template provided."
msgstr ""
-#: heat/engine/service.py:1340
+#: heat/engine/service.py:1354
msgid "Deleting in-progress snapshot"
msgstr ""
-#: heat/engine/service_software_config.py:65
+#: heat/engine/service_software_config.py:75
msgid "server_id must be specified"
msgstr ""
-#: heat/engine/service_software_config.py:171
+#: heat/engine/service_software_config.py:206
msgid "deployment_id must be specified"
msgstr ""
-#: heat/engine/service_software_config.py:190
+#: heat/engine/service_software_config.py:225
#, python-format
msgid "Deployment exited with non-zero status code: %s"
msgstr ""
-#: heat/engine/service_software_config.py:217
+#: heat/engine/service_software_config.py:252
msgid "Outputs received"
msgstr ""
@@ -1724,60 +1731,60 @@ msgstr ""
msgid "Outputs must contain Output. Found a [%s] instead"
msgstr ""
-#: heat/engine/support.py:43
+#: heat/engine/support.py:42
#, python-format
msgid "previous_status must be SupportStatus instead of %s"
msgstr ""
-#: heat/engine/support.py:48
+#: heat/engine/support.py:47
#, python-format
msgid "Specified status is invalid, defaulting to %s"
msgstr ""
-#: heat/engine/sync_point.py:114
+#: heat/engine/sync_point.py:118
#, python-format
msgid "Sync Point %s not found"
msgstr ""
-#: heat/engine/template.py:44
+#: heat/engine/template.py:45
#, python-format
msgid "Ambiguous versions (%s)"
msgstr ""
-#: heat/engine/template.py:49
+#: heat/engine/template.py:50
msgid "Template version was not provided"
msgstr ""
-#: heat/engine/template.py:66
+#: heat/engine/template.py:67
#, python-format
msgid "Could not load %(name)s: %(error)s"
msgstr ""
-#: heat/engine/template.py:82
+#: heat/engine/template.py:83
#, python-format
msgid "\"%(version)s\". \"%(version_type)s\" should be one of: %(available)s"
msgstr ""
-#: heat/engine/template.py:85
+#: heat/engine/template.py:86
#, python-format
msgid "\"%(version)s\". \"%(version_type)s\" should be: %(available)s"
msgstr ""
-#: heat/engine/template.py:180
+#: heat/engine/template.py:183
#, python-format
msgid "\"%s\" is not a valid keyword inside a resource definition"
msgstr ""
-#: heat/engine/template.py:187
+#: heat/engine/template.py:190
#, python-format
msgid "Resource %(name)s %(key)s type must be %(typename)s"
msgstr ""
-#: heat/engine/template.py:239
+#: heat/engine/template.py:254
msgid "Each Resource must contain a Type key."
msgstr ""
-#: heat/engine/template.py:243
+#: heat/engine/template.py:258
#, python-format
msgid "Resources must contain Resource. Found a [%s] instead"
msgstr ""
@@ -1829,6 +1836,9 @@ msgstr ""
#: heat/engine/cfn/functions.py:304 heat/engine/cfn/functions.py:310
#: heat/engine/cfn/functions.py:358 heat/engine/cfn/functions.py:364
#: heat/engine/cfn/functions.py:417 heat/engine/cfn/functions.py:423
+#: heat/engine/hot/functions.py:264 heat/engine/hot/functions.py:271
+#: heat/engine/hot/functions.py:278 heat/engine/hot/functions.py:457
+#: heat/engine/hot/functions.py:467
#, python-format
msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
msgstr ""
@@ -1932,6 +1942,11 @@ msgstr ""
msgid "Resource %(name)s is missing \"%(type_key)s\""
msgstr ""
+#: heat/engine/clients/__init__.py:93
+#, python-format
+msgid "Invalid cloud_backend setting in heat.conf detected - %s"
+msgstr ""
+
#: heat/engine/clients/client_plugin.py:115
msgid "Unknown Keystone version"
msgstr ""
@@ -1949,41 +1964,41 @@ msgstr ""
msgid "Error retrieving image list from glance: %s"
msgstr ""
-#: heat/engine/clients/os/nova.py:222
+#: heat/engine/clients/os/nova.py:229
#, python-format
msgid "Message: %(message)s, Code: %(code)s"
msgstr ""
-#: heat/engine/clients/os/nova.py:229
+#: heat/engine/clients/os/nova.py:236
#, python-format
msgid "%s is not active"
msgstr ""
-#: heat/engine/clients/os/nova.py:404
+#: heat/engine/clients/os/nova.py:411
#, python-format
msgid "Server %(name)s delete failed: (%(code)s) %(message)s"
msgstr ""
-#: heat/engine/clients/os/nova.py:435
+#: heat/engine/clients/os/nova.py:445
#, python-format
msgid "Resizing to '%(flavor)s' failed, status '%(status)s'"
msgstr ""
-#: heat/engine/clients/os/nova.py:457
+#: heat/engine/clients/os/nova.py:467
#, python-format
msgid "Rebuilding server failed, status '%s'"
msgstr ""
-#: heat/engine/clients/os/nova.py:465
+#: heat/engine/clients/os/nova.py:475
msgid "nova server metadata needs to be a Map."
msgstr ""
-#: heat/engine/clients/os/nova.py:573
+#: heat/engine/clients/os/nova.py:583
#, python-format
msgid "Failed to attach volume %(vol)s to server %(srv)s - %(err)s"
msgstr ""
-#: heat/engine/clients/os/nova.py:590
+#: heat/engine/clients/os/nova.py:600
#, python-format
msgid "Could not detach attachment %(att)s from server %(srv)s."
msgstr ""
@@ -2050,7 +2065,7 @@ msgid ""
"(path), ...]"
msgstr ""
-#: heat/engine/hot/functions.py:178 heat/engine/hot/functions.py:300
+#: heat/engine/hot/functions.py:178 heat/engine/hot/functions.py:342
#, python-format
msgid "Arguments to \"%s\" must be a map"
msgstr ""
@@ -2072,41 +2087,51 @@ msgid ""
"%(file_key)s"
msgstr ""
-#: heat/engine/hot/functions.py:269
+#: heat/engine/hot/functions.py:311
#, python-format
msgid "The function %s is not supported in this version of HOT."
msgstr ""
-#: heat/engine/hot/functions.py:311
+#: heat/engine/hot/functions.py:353
#, python-format
msgid "\"repeat\" syntax should be %s"
msgstr ""
-#: heat/engine/hot/functions.py:315
+#: heat/engine/hot/functions.py:357
#, python-format
msgid "The \"for_each\" argument to \"%s\" must contain a map"
msgstr ""
-#: heat/engine/hot/functions.py:319
+#: heat/engine/hot/functions.py:361
#, python-format
msgid "The values of the \"for_each\" argument to \"%s\" must be lists"
msgstr ""
-#: heat/engine/hot/functions.py:363
+#: heat/engine/hot/functions.py:405
#, python-format
msgid "Argument to function \"%s\" must be a list of strings"
msgstr ""
-#: heat/engine/hot/functions.py:367
+#: heat/engine/hot/functions.py:409
#, python-format
msgid "Function \"%s\" usage: [\"<algorithm>\", \"<value>\"]"
msgstr ""
-#: heat/engine/hot/functions.py:371
+#: heat/engine/hot/functions.py:413
#, python-format
msgid "Algorithm must be one of %s"
msgstr ""
+#: heat/engine/hot/functions.py:476
+#, python-format
+msgid "Incorrect index to \"%(fn_name)s\" should be: %(example)s"
+msgstr ""
+
+#: heat/engine/hot/functions.py:482
+#, python-format
+msgid "Incorrect index to \"%(fn_name)s\" should be between 0 and %(max_index)s"
+msgstr ""
+
#: heat/engine/hot/parameters.py:70
#, python-format
msgid "Invalid parameter constraints for parameter %s, expected a list"
@@ -2137,34 +2162,34 @@ msgid ""
"[%(_type)s] instead"
msgstr ""
-#: heat/engine/resources/stack_resource.py:73
+#: heat/engine/resources/stack_resource.py:74
msgid "Failed to validate"
msgstr ""
-#: heat/engine/resources/stack_resource.py:132
+#: heat/engine/resources/stack_resource.py:133
msgid "Nested stack not found in DB"
msgstr ""
-#: heat/engine/resources/stack_resource.py:219
+#: heat/engine/resources/stack_resource.py:220
#, python-format
msgid "Recursion depth exceeds %d."
msgstr ""
-#: heat/engine/resources/stack_resource.py:379
+#: heat/engine/resources/stack_resource.py:380
msgid "Stack unknown status"
msgstr ""
-#: heat/engine/resources/stack_resource.py:467
+#: heat/engine/resources/stack_resource.py:471
#, python-format
msgid "Cannot suspend %s, stack not created"
msgstr ""
-#: heat/engine/resources/stack_resource.py:481
+#: heat/engine/resources/stack_resource.py:485
#, python-format
msgid "Cannot resume %s, stack not created"
msgstr ""
-#: heat/engine/resources/stack_resource.py:495
+#: heat/engine/resources/stack_resource.py:499
#, python-format
msgid "Cannot check %s, stack not created"
msgstr ""
@@ -2186,39 +2211,39 @@ msgstr ""
msgid "Only Templates with an extension of .yaml or .template are supported"
msgstr ""
-#: heat/engine/resources/template_resource.py:89
+#: heat/engine/resources/template_resource.py:90
#, python-format
msgid "Could not fetch remote template \"%(name)s\": %(exc)s"
msgstr ""
-#: heat/engine/resources/template_resource.py:198
+#: heat/engine/resources/template_resource.py:200
#, python-format
msgid "Unknown error retrieving %s"
msgstr ""
-#: heat/engine/resources/template_resource.py:207
+#: heat/engine/resources/template_resource.py:209
#, python-format
msgid "Required property %(n)s for facade %(type)s missing in provider"
msgstr ""
-#: heat/engine/resources/template_resource.py:215
+#: heat/engine/resources/template_resource.py:217
#, python-format
msgid ""
"Property %(n)s type mismatch between facade %(type)s (%(fs_type)s) and "
"provider (%(ps_type)s)"
msgstr ""
-#: heat/engine/resources/template_resource.py:224
+#: heat/engine/resources/template_resource.py:226
#, python-format
msgid "Provider requires property %(n)s unknown in facade %(type)s"
msgstr ""
-#: heat/engine/resources/template_resource.py:231
+#: heat/engine/resources/template_resource.py:233
#, python-format
msgid "Attribute %(attr)s for facade %(type)s missing in provider"
msgstr ""
-#: heat/engine/resources/template_resource.py:244
+#: heat/engine/resources/template_resource.py:246
#, python-format
msgid "Failed to retrieve template data: %s"
msgstr ""
@@ -2339,28 +2364,28 @@ msgstr ""
msgid "Start resizing the group %(group)s"
msgstr ""
-#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:352
+#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:353
#, python-format
msgid "End resizing the group %(group)s"
msgstr ""
-#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:376
+#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:378
msgid "MinSize can not be greater than MaxSize"
msgstr ""
-#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:380
+#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:382
msgid "The size of AutoScalingGroup can not be less than zero"
msgstr ""
-#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:386
+#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:388
msgid "DesiredCapacity must be between MinSize and MaxSize"
msgstr ""
-#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:395
+#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:397
msgid "Anything other than one VPCZoneIdentifier"
msgstr ""
-#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:405
+#: heat/engine/resources/aws/autoscaling/autoscaling_group.py:407
msgid "Either 'InstanceId' or 'LaunchConfigurationName' must be provided."
msgstr ""
@@ -2518,7 +2543,7 @@ msgid "The set of parameters passed to this nested stack."
msgstr ""
#: heat/engine/resources/aws/cfn/stack.py:61
-#: heat/engine/resources/aws/cfn/stack.py:105
+#: heat/engine/resources/aws/cfn/stack.py:102
#, python-format
msgid "Could not fetch remote template '%(url)s': %(exc)s"
msgstr ""
@@ -2674,7 +2699,7 @@ msgid "Instance is not ACTIVE (was: %s)"
msgstr ""
#: heat/engine/resources/aws/ec2/instance.py:786
-#: heat/engine/resources/openstack/nova/server.py:1362
+#: heat/engine/resources/openstack/nova/server.py:1396
#, python-format
msgid "Cannot suspend %s, resource_id not set"
msgstr ""
@@ -2691,7 +2716,7 @@ msgid "Suspend of instance %s failed"
msgstr ""
#: heat/engine/resources/aws/ec2/instance.py:825
-#: heat/engine/resources/openstack/nova/server.py:1401
+#: heat/engine/resources/openstack/nova/server.py:1435
#, python-format
msgid "Cannot resume %s, resource_id not set"
msgstr ""
@@ -2944,7 +2969,7 @@ msgstr ""
msgid "Owner of the source security group."
msgstr ""
-#: heat/engine/resources/aws/lb/loadbalancer.py:617
+#: heat/engine/resources/aws/lb/loadbalancer.py:614
msgid "Custom LoadBalancer template can not be found"
msgstr ""
@@ -3053,39 +3078,39 @@ msgstr ""
msgid "The passphrase the created key."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:128
+#: heat/engine/resources/openstack/barbican/order.py:129
msgid "The status of the order."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:129
+#: heat/engine/resources/openstack/barbican/order.py:133
msgid "The URI to the order."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:130
+#: heat/engine/resources/openstack/barbican/order.py:137
msgid "The URI to the created secret."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:132
+#: heat/engine/resources/openstack/barbican/order.py:141
msgid "The URI to the created container."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:135
+#: heat/engine/resources/openstack/barbican/order.py:146
msgid "The payload of the created public key, if available."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:138
+#: heat/engine/resources/openstack/barbican/order.py:151
msgid "The payload of the created private key, if available."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:141
+#: heat/engine/resources/openstack/barbican/order.py:156
msgid "The payload of the created certificate, if available."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:144
+#: heat/engine/resources/openstack/barbican/order.py:161
msgid "The payload of the created intermediates, if available."
msgstr ""
-#: heat/engine/resources/openstack/barbican/order.py:167
+#: heat/engine/resources/openstack/barbican/order.py:186
#, python-format
msgid "Order '%(name)s' failed: %(code)s - %(reason)s"
msgstr ""
@@ -3102,7 +3127,7 @@ msgstr ""
msgid "The status of the secret."
msgstr ""
-#: heat/engine/resources/openstack/barbican/secret.py:104
+#: heat/engine/resources/openstack/barbican/secret.py:105
msgid "The decrypted secret payload."
msgstr ""
@@ -3234,6 +3259,30 @@ msgstr ""
msgid "The query to filter the metrics"
msgstr ""
+#: heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py:42
+msgid ""
+"The class that provides encryption support. For example, "
+"nova.volume.encryptors.luks.LuksEncryptor."
+msgstr ""
+
+#: heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py:49
+msgid ""
+"Notional service where encryption is performed For example, front-end. "
+"For Nova."
+msgstr ""
+
+#: heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py:59
+msgid "The encryption algorithm or mode. For example, aes-xts-plain64."
+msgstr ""
+
+#: heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py:71
+msgid "Size of encryption key, in bits. For example, 128 or 256."
+msgstr ""
+
+#: heat/engine/resources/openstack/cinder/cinder_encrypted_vol_type.py:78
+msgid "Name or id of volume type (OS::Cinder::VolumeType)."
+msgstr ""
+
#: heat/engine/resources/openstack/cinder/cinder_volume_type.py:57
msgid "Name of the volume type."
msgstr ""
@@ -3289,8 +3338,8 @@ msgstr ""
#: heat/engine/resources/openstack/neutron/floatingip.py:48
#: heat/engine/resources/openstack/neutron/loadbalancer.py:230
#: heat/engine/resources/openstack/neutron/network_gateway.py:101
-#: heat/engine/resources/openstack/neutron/port.py:71
-#: heat/engine/resources/openstack/neutron/port.py:118
+#: heat/engine/resources/openstack/neutron/port.py:75
+#: heat/engine/resources/openstack/neutron/port.py:122
#: heat/engine/resources/openstack/neutron/router.py:97
#: heat/engine/resources/openstack/neutron/router.py:285
#: heat/engine/resources/openstack/neutron/router.py:295
@@ -3619,7 +3668,7 @@ msgid ""
"name of the resource."
msgstr ""
-#: heat/engine/resources/openstack/heat/instance_group.py:314
+#: heat/engine/resources/openstack/heat/instance_group.py:311
#, python-format
msgid "The current %s will result in stack update timeout."
msgstr ""
@@ -3852,7 +3901,7 @@ msgstr ""
msgid "A url to handle the alarm using native API."
msgstr ""
-#: heat/engine/resources/openstack/heat/scaling_policy.py:174
+#: heat/engine/resources/openstack/heat/scaling_policy.py:175
#, python-format
msgid "Alarm %(alarm)s could not find scaling group named \"%(group)s\""
msgstr ""
@@ -3949,32 +3998,32 @@ msgstr ""
msgid "The config value of the software config."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:119
+#: heat/engine/resources/openstack/heat/software_deployment.py:123
msgid ""
"ID of software configuration resource to execute when applying to the "
"server."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:125
+#: heat/engine/resources/openstack/heat/software_deployment.py:129
msgid "ID of Nova server to apply configuration to."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:132
+#: heat/engine/resources/openstack/heat/software_deployment.py:136
msgid "Input values to apply to the software configuration on this server."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:138
+#: heat/engine/resources/openstack/heat/software_deployment.py:142
msgid "Which stack actions will result in this deployment being triggered."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:146
+#: heat/engine/resources/openstack/heat/software_deployment.py:150
msgid ""
"Name of the derived config associated with this deployment. This is used "
"to apply a sort order to the list of configurations currently deployed to"
" a server."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:152
+#: heat/engine/resources/openstack/heat/software_deployment.py:156
msgid ""
"How the server should signal to heat with the deployment output values. "
"CFN_SIGNAL will allow an HTTP POST to a CFN keypair signed URL. "
@@ -3984,111 +4033,115 @@ msgid ""
"going to the COMPLETE state without waiting for any signal."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:169
+#: heat/engine/resources/openstack/heat/software_deployment.py:173
msgid "Captured stdout from the configuration execution."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:173
+#: heat/engine/resources/openstack/heat/software_deployment.py:177
msgid "Captured stderr from the configuration execution."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:177
+#: heat/engine/resources/openstack/heat/software_deployment.py:181
msgid "Returned status code from the configuration execution"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:210
+#: heat/engine/resources/openstack/heat/software_deployment.py:218
msgid "Not waiting for outputs signal"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:213
+#: heat/engine/resources/openstack/heat/software_deployment.py:221
msgid "Deploy data available"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:278
+#: heat/engine/resources/openstack/heat/software_deployment.py:286
#, python-format
msgid "Deployment to server failed: %s"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:366
+#: heat/engine/resources/openstack/heat/software_deployment.py:396
msgid "ID of the server being deployed to"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:371
+#: heat/engine/resources/openstack/heat/software_deployment.py:401
msgid "Name of the current action being deployed"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:376
+#: heat/engine/resources/openstack/heat/software_deployment.py:406
msgid "ID of the stack this deployment belongs to"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:381
+#: heat/engine/resources/openstack/heat/software_deployment.py:411
msgid "Name of this deployment resource in the stack"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:387
+#: heat/engine/resources/openstack/heat/software_deployment.py:417
msgid "How the server should signal to heat with the deployment output values."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:395
-#: heat/engine/resources/openstack/heat/software_deployment.py:410
+#: heat/engine/resources/openstack/heat/software_deployment.py:425
+#: heat/engine/resources/openstack/heat/software_deployment.py:440
msgid "ID of signal to use for signaling output values"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:402
-#: heat/engine/resources/openstack/heat/software_deployment.py:417
+#: heat/engine/resources/openstack/heat/software_deployment.py:432
+#: heat/engine/resources/openstack/heat/software_deployment.py:447
msgid "HTTP verb to use for signaling output values"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:425
+#: heat/engine/resources/openstack/heat/software_deployment.py:455
msgid "URL for API authentication"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:430
+#: heat/engine/resources/openstack/heat/software_deployment.py:460
msgid "Username for API authentication"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:435
+#: heat/engine/resources/openstack/heat/software_deployment.py:465
msgid "User ID for API authentication"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:440
+#: heat/engine/resources/openstack/heat/software_deployment.py:470
msgid "Password for API authentication"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:445
+#: heat/engine/resources/openstack/heat/software_deployment.py:475
msgid "ID of project for API authentication"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:578
+#: heat/engine/resources/openstack/heat/software_deployment.py:482
+msgid "ID of queue to use for signaling output values"
+msgstr ""
+
+#: heat/engine/resources/openstack/heat/software_deployment.py:618
#, python-format
msgid ""
"Resource %s's property user_data_format should be set to SOFTWARE_CONFIG "
"since there are software deployments on it."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:616
+#: heat/engine/resources/openstack/heat/software_deployment.py:656
msgid "A map of Nova names and IDs to apply configuration to."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:628
+#: heat/engine/resources/openstack/heat/software_deployment.py:668
msgid ""
"A map of Nova names and captured stdouts from the configuration execution"
" to each server."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:633
+#: heat/engine/resources/openstack/heat/software_deployment.py:673
msgid ""
"A map of Nova names and captured stderrs from the configuration execution"
" to each server."
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:638
+#: heat/engine/resources/openstack/heat/software_deployment.py:678
msgid ""
"A map of Nova names and returned status code from the configuration "
"execution"
msgstr ""
-#: heat/engine/resources/openstack/heat/software_deployment.py:682
+#: heat/engine/resources/openstack/heat/software_deployment.py:722
msgid ""
"This resource is deprecated and use is discouraged. Please use resource "
"OS::Heat:SoftwareDeploymentGroup instead."
@@ -4174,12 +4227,12 @@ msgid "Token for stack-user which can be used for signalling handle"
msgstr ""
#: heat/engine/resources/openstack/keystone/endpoint.py:26
-#: heat/engine/resources/openstack/keystone/group.py:27
+#: heat/engine/resources/openstack/keystone/group.py:26
#: heat/engine/resources/openstack/keystone/project.py:26
#: heat/engine/resources/openstack/keystone/role.py:25
#: heat/engine/resources/openstack/keystone/role_assignments.py:55
#: heat/engine/resources/openstack/keystone/service.py:25
-#: heat/engine/resources/openstack/keystone/user.py:27
+#: heat/engine/resources/openstack/keystone/user.py:26
msgid "Supported versions: keystone v3"
msgstr ""
@@ -4203,16 +4256,16 @@ msgstr ""
msgid "URL of keystone service endpoint."
msgstr ""
-#: heat/engine/resources/openstack/keystone/group.py:38
+#: heat/engine/resources/openstack/keystone/group.py:37
msgid "Name of keystone group."
msgstr ""
-#: heat/engine/resources/openstack/keystone/group.py:43
+#: heat/engine/resources/openstack/keystone/group.py:42
#: heat/engine/resources/openstack/keystone/project.py:44
msgid "Name or id of keystone domain."
msgstr ""
-#: heat/engine/resources/openstack/keystone/group.py:50
+#: heat/engine/resources/openstack/keystone/group.py:49
msgid "Description of keystone group."
msgstr ""
@@ -4269,42 +4322,86 @@ msgstr ""
msgid "Type of keystone Service."
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:40
+#: heat/engine/resources/openstack/keystone/user.py:39
msgid "Name of keystone user."
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:45
+#: heat/engine/resources/openstack/keystone/user.py:44
msgid "Name of keystone domain."
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:52
+#: heat/engine/resources/openstack/keystone/user.py:51
msgid "Description of keystone user."
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:58
+#: heat/engine/resources/openstack/keystone/user.py:57
msgid "Keystone user is enabled or disabled"
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:64
+#: heat/engine/resources/openstack/keystone/user.py:63
msgid "Email address of keystone user."
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:69
+#: heat/engine/resources/openstack/keystone/user.py:68
msgid "Password of keystone user."
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:74
+#: heat/engine/resources/openstack/keystone/user.py:73
msgid "Default project of keystone user."
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:80
+#: heat/engine/resources/openstack/keystone/user.py:79
msgid "keystone user groups."
msgstr ""
-#: heat/engine/resources/openstack/keystone/user.py:84
+#: heat/engine/resources/openstack/keystone/user.py:83
msgid "keystone user group."
msgstr ""
+#: heat/engine/resources/openstack/magnum/baymodel.py:42
+msgid "The bay model name."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:46
+msgid "The image name or UUID to use as a base image for this baymodel."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:55
+msgid "The flavor of this bay model."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:62
+msgid "The flavor of the master node for this bay model."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:69
+msgid "The name or id of the nova ssh keypair."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:77
+msgid "The external network to attach the Bay."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:85
+msgid "The fixed network to attach the Bay."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:92
+msgid "The DNS nameserver address."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:100
+msgid "The size in GB of the docker volume."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:107
+msgid "The SSH Authorized Key."
+msgstr ""
+
+#: heat/engine/resources/openstack/magnum/baymodel.py:111
+msgid "The Container Orchestration Engine for this bay model."
+msgstr ""
+
#: heat/engine/resources/openstack/manila/security_service.py:41
msgid "Security service name."
msgstr ""
@@ -4337,6 +4434,156 @@ msgstr ""
msgid "Security service description."
msgstr ""
+#: heat/engine/resources/openstack/manila/share.py:76
+msgid "Share protocol supported by shared filesystem."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:83
+msgid "Share storage size in GB."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:88
+msgid ""
+"Name or ID of shared file system snapshot that will be restored and "
+"created as a new share."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:94
+msgid "Share name."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:99
+msgid "Metadata key-values defined for share."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:104
+msgid "Name or ID of shared network defined for shared filesystem."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:109
+msgid "Share description."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:114
+msgid ""
+"Name or ID of shared filesystem type. Types defines some share filesystem"
+" profiles that will be used for share creation."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:120
+msgid "Defines if shared filesystem is public or private."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:126
+msgid "A list of access rules that define access from IP to Share."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:132
+msgid ""
+"IP or other address information about guest that allowed to access to "
+"Share."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:138
+msgid "Type of access that should be provided to guest."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:145
+msgid "Level of access that need to be provided for guest."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:158
+msgid "The availability zone of shared filesystem."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:162
+msgid "Share host."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:166
+msgid "Export locations of share."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:170
+msgid ""
+"ID of server (VM, etc...) on host that is used for exporting network "
+"file-system."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:175
+msgid "Datetime when a share was created."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:179
+msgid "Current share status."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:183
+msgid "Share project ID."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:245
+msgid ""
+"Error during applying access rules to share \"{0}\". The root cause of "
+"the problem is the following: {1}."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:251
+msgid "Error during creation of share \"{0}\""
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:256
+msgid "Unknown share_status during creation of share \"{0}\""
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:286
+msgid "Error during deleting share \"{0}\"."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share.py:291
+msgid "Unknown status during deleting share \"{0}\""
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:48
+msgid "Name of the share network."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:53
+msgid "Neutron network id."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:59
+msgid "Neutron subnet id."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:65
+msgid "Nova network id."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:71
+msgid "Share network description."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:76
+msgid "A list of security services IDs or names."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:87
+msgid "VLAN ID for VLAN networks or tunnel-id for GRE/VXLAN networks."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:92
+msgid "CIDR of subnet."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:96
+msgid "Version of IP address."
+msgstr ""
+
+#: heat/engine/resources/openstack/manila/share_network.py:100
+msgid "The physical mechanism by which the virtual network is implemented."
+msgstr ""
+
#: heat/engine/resources/openstack/manila/share_type.py:45
msgid "Name of the share type."
msgstr ""
@@ -4615,9 +4862,9 @@ msgstr ""
#: heat/engine/resources/openstack/neutron/floatingip.py:125
#: heat/engine/resources/openstack/neutron/loadbalancer.py:149
#: heat/engine/resources/openstack/neutron/loadbalancer.py:638
-#: heat/engine/resources/openstack/neutron/net.py:102
+#: heat/engine/resources/openstack/neutron/net.py:113
#: heat/engine/resources/openstack/neutron/network_gateway.py:145
-#: heat/engine/resources/openstack/neutron/port.py:269
+#: heat/engine/resources/openstack/neutron/port.py:281
#: heat/engine/resources/openstack/neutron/provider_net.py:87
#: heat/engine/resources/openstack/neutron/router.py:151
#: heat/engine/resources/openstack/neutron/subnet.py:242
@@ -4979,7 +5226,7 @@ msgid "Provider implementing this load balancer instance."
msgstr ""
#: heat/engine/resources/openstack/neutron/loadbalancer.py:407
-#: heat/tests/neutron/test_neutron_loadbalancer.py:601
+#: heat/tests/neutron/test_neutron_loadbalancer.py:606
msgid ""
"Property cookie_name is required, when session_persistence type is set to"
" APP_COOKIE."
@@ -5098,66 +5345,76 @@ msgstr ""
msgid "CIDR to be associated with this metering rule."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:38
+#: heat/engine/resources/openstack/neutron/net.py:41
msgid ""
"A string specifying a symbolic name for the network, which is not "
"required to be unique."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:44
+#: heat/engine/resources/openstack/neutron/net.py:47
msgid ""
"Extra parameters to include in the \"network\" object in the creation "
"request. Parameters are often specific to installed hardware or "
"extensions."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:52
+#: heat/engine/resources/openstack/neutron/net.py:55
msgid "A boolean value specifying the administrative status of the network."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:59
+#: heat/engine/resources/openstack/neutron/net.py:62
msgid ""
"The ID of the tenant which will own the network. Only administrative "
"users can set the tenant identifier; this cannot be changed using "
"authorization policies."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:65
+#: heat/engine/resources/openstack/neutron/net.py:68
msgid ""
"Whether this network should be shared across all tenants. Note that the "
"default policy setting restricts usage of this attribute to "
"administrative users only."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:73
+#: heat/engine/resources/openstack/neutron/net.py:76
msgid ""
"The IDs of the DHCP agent to schedule the network. Note that the default "
"policy setting in Neutron restricts usage of this property to "
"administrative users only."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:82
+#: heat/engine/resources/openstack/neutron/net.py:83
+msgid ""
+"Flag to enable/disable port security on the network. It provides the "
+"default value for the attribute of the ports created on this network"
+msgstr ""
+
+#: heat/engine/resources/openstack/neutron/net.py:93
#: heat/engine/resources/openstack/neutron/provider_net.py:79
msgid "The status of the network."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:86
+#: heat/engine/resources/openstack/neutron/net.py:97
msgid "The name of the network."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:90
+#: heat/engine/resources/openstack/neutron/net.py:101
#: heat/engine/resources/openstack/neutron/provider_net.py:83
msgid "Subnets of this network."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:94
+#: heat/engine/resources/openstack/neutron/net.py:105
msgid "The administrative status of the network."
msgstr ""
-#: heat/engine/resources/openstack/neutron/net.py:98
+#: heat/engine/resources/openstack/neutron/net.py:109
msgid "The tenant owning this network."
msgstr ""
+#: heat/engine/resources/openstack/neutron/net.py:117
+msgid "Port security enabled of the network."
+msgstr ""
+
#: heat/engine/resources/openstack/neutron/network_gateway.py:62
msgid "The name of the network gateway."
msgstr ""
@@ -5208,71 +5465,71 @@ msgstr ""
msgid "Resource is not built"
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:80
+#: heat/engine/resources/openstack/neutron/port.py:84
#, python-format
msgid ""
"Network this port belongs to. If you plan to use current port to assign "
"Floating IP, you should specify %(fixed_ips)s with %(subnet)s"
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:92
+#: heat/engine/resources/openstack/neutron/port.py:96
msgid "A symbolic name for this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:97
+#: heat/engine/resources/openstack/neutron/port.py:101
msgid ""
"Extra parameters to include in the \"port\" object in the creation "
"request."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:103
-#: heat/engine/resources/openstack/neutron/port.py:224
+#: heat/engine/resources/openstack/neutron/port.py:107
+#: heat/engine/resources/openstack/neutron/port.py:236
msgid "The administrative state of this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:109
+#: heat/engine/resources/openstack/neutron/port.py:113
msgid "Desired IPs for this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:126
+#: heat/engine/resources/openstack/neutron/port.py:130
msgid "Subnet in which to allocate the IP address for this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:135
+#: heat/engine/resources/openstack/neutron/port.py:139
msgid "IP address desired in the subnet for this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:146
+#: heat/engine/resources/openstack/neutron/port.py:150
msgid "MAC address to give to this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:153
+#: heat/engine/resources/openstack/neutron/port.py:157
msgid "Device ID of this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:158
+#: heat/engine/resources/openstack/neutron/port.py:162
msgid "Security group IDs to associate with this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:163
+#: heat/engine/resources/openstack/neutron/port.py:167
msgid "Additional MAC/IP address pairs allowed to pass through the port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:170
+#: heat/engine/resources/openstack/neutron/port.py:174
msgid "MAC address to allow through this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:177
+#: heat/engine/resources/openstack/neutron/port.py:181
msgid "IP address to allow through this port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:188
+#: heat/engine/resources/openstack/neutron/port.py:192
msgid ""
"Name of the network owning the port. The value is typically "
"network:floatingip or network:router_interface or network:dhcp"
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:195
+#: heat/engine/resources/openstack/neutron/port.py:199
msgid ""
"Policy on how to respond to a stack-update for this resource. "
"REPLACE_ALWAYS will replace the port regardless of any property changes. "
@@ -5280,7 +5537,7 @@ msgid ""
"property."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:207
+#: heat/engine/resources/openstack/neutron/port.py:211
msgid ""
"The vnic type to be bound on the neutron port. To support SR-IOV PCI "
"passthrough networking, you can request that the neutron port to be "
@@ -5289,50 +5546,61 @@ msgid ""
"only works for Neutron deployments that support the bindings extension."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:228
+#: heat/engine/resources/openstack/neutron/port.py:226
+msgid ""
+"Flag to enable/disable port security on the port. When disable this "
+"feature(set it to False), there will be no packages filtering, like "
+"security-group and address-pairs."
+msgstr ""
+
+#: heat/engine/resources/openstack/neutron/port.py:240
msgid "Unique identifier for the device."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:232
+#: heat/engine/resources/openstack/neutron/port.py:244
msgid "Name of the network owning the port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:236
+#: heat/engine/resources/openstack/neutron/port.py:248
msgid "Fixed IP addresses."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:240
+#: heat/engine/resources/openstack/neutron/port.py:252
msgid "MAC address of the port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:244
+#: heat/engine/resources/openstack/neutron/port.py:256
msgid "Friendly name of the port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:248
+#: heat/engine/resources/openstack/neutron/port.py:260
msgid "Unique identifier for the network owning the port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:252
+#: heat/engine/resources/openstack/neutron/port.py:264
msgid "A list of security groups for the port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:256
+#: heat/engine/resources/openstack/neutron/port.py:268
msgid "The status of the port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:260
+#: heat/engine/resources/openstack/neutron/port.py:272
msgid "Tenant owning the port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:264
+#: heat/engine/resources/openstack/neutron/port.py:276
msgid "Additional MAC/IP address pairs allowed to pass through a port."
msgstr ""
-#: heat/engine/resources/openstack/neutron/port.py:273
+#: heat/engine/resources/openstack/neutron/port.py:285
msgid "A list of all subnet attributes for the port."
msgstr ""
+#: heat/engine/resources/openstack/neutron/port.py:289
+msgid "Port security enabled of the port."
+msgstr ""
+
#: heat/engine/resources/openstack/neutron/provider_net.py:47
msgid "A string specifying the provider network type for the network."
msgstr ""
@@ -6319,28 +6587,28 @@ msgid ""
"are novnc, xvpvnc, spice-html5, rdp-html5, serial."
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1191
+#: heat/engine/resources/openstack/nova/server.py:1211
#, python-format
msgid "Either volume_id or snapshot_id must be specified for device mapping %s"
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1216
+#: heat/engine/resources/openstack/nova/server.py:1236
msgid "Either volume_id, snapshot_id, image_id or swap_size must be specified."
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1236
+#: heat/engine/resources/openstack/nova/server.py:1256
#, python-format
msgid "Neither image nor bootable volume is specified for instance %s"
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1251
+#: heat/engine/resources/openstack/nova/server.py:1271
#, python-format
msgid ""
"One of the properties \"%(id)s\", \"%(port_id)s\", \"%(uuid)s\" should be"
" set for the specified network of server \"%(server)s\"."
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1261
+#: heat/engine/resources/openstack/nova/server.py:1281
#, python-format
msgid ""
"Properties \"%(uuid)s\" and \"%(id)s\" are both set to the network "
@@ -6348,32 +6616,32 @@ msgid ""
"is deprecated. Use only \"%(id)s\" property."
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1297
+#: heat/engine/resources/openstack/nova/server.py:1317
#, python-format
msgid ""
"Instance metadata must not contain greater than %s entries. This is the "
"maximum number allowed by your service provider"
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1306
+#: heat/engine/resources/openstack/nova/server.py:1326
#, python-format
msgid "The personality property may not contain greater than %s entries."
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1312
+#: heat/engine/resources/openstack/nova/server.py:1332
#, python-format
msgid ""
"The contents of personality file \"%(path)s\" is larger than the maximum "
"allowed personality file size (%(max_size)s bytes)."
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1369
-#: heat/engine/resources/openstack/nova/server.py:1408
+#: heat/engine/resources/openstack/nova/server.py:1403
+#: heat/engine/resources/openstack/nova/server.py:1442
#, python-format
msgid "Failed to find server %s"
msgstr ""
-#: heat/engine/resources/openstack/nova/server.py:1390
+#: heat/engine/resources/openstack/nova/server.py:1424
#, python-format
msgid "Suspend of server %s failed"
msgstr ""
@@ -6759,7 +7027,7 @@ msgid "A list of instances ids."
msgstr ""
#: heat/engine/resources/openstack/trove/trove_cluster.py:127
-msgid "IP of the cluster."
+msgid "A list of cluster instance IPs."
msgstr ""
#: heat/engine/resources/openstack/zaqar/queue.py:43
@@ -6820,7 +7088,7 @@ msgstr ""
msgid "Unsupported resource '%s' in LoadBalancerNames"
msgstr ""
-#: heat/tests/test_exception.py:27
+#: heat/tests/test_exception.py:28
#, python-format
msgid "Testing message %(text)s"
msgstr ""
@@ -6839,58 +7107,76 @@ msgstr ""
msgid "Failed to retrieve template"
msgstr ""
-#: heat/tests/test_remote_stack.py:635
+#: heat/tests/test_remote_stack.py:642
msgid ""
-"ResourceInError: Went to status UPDATE_FAILED due to \"Remote stack "
-"update failed\""
+"ResourceInError: resources.remote_stack: Went to status UPDATE_FAILED due"
+" to \"Remote stack update failed\""
msgstr ""
-#: heat/tests/test_server.py:1085
-#, python-format
-msgid ""
-"Properties \"uuid\" and \"network\" are both set to the network "
-"\"%(network)s\" for the server \"%(server)s\". The \"uuid\" property is "
-"deprecated. Use only \"network\" property."
+#: heat/tests/test_software_deployment.py:832
+msgid "need more memory."
msgstr ""
-#: heat/tests/test_server.py:1131
-#, python-format
+#: heat/tests/test_validate.py:1462
msgid ""
-"One of the properties \"network\", \"port\", \"uuid\" should be set for "
-"the specified network of server \"%s\"."
+"Parameter Groups error: parameter_groups.Database Group: The InstanceType"
+" parameter must be assigned to one parameter group only."
msgstr ""
-#: heat/tests/test_software_deployment.py:815
-msgid "need more memory."
+#: heat/tests/test_validate.py:1475
+msgid ""
+"Parameter Groups error: parameter_groups.: The key_name parameter must be"
+" assigned to one parameter group only."
msgstr ""
-#: heat/tests/test_validate.py:1410
+#: heat/tests/test_validate.py:1493
msgid ""
-"Parameter Groups error : parameter_groups.Database Group: The "
-"InstanceType parameter must be assigned to one parameter group only."
+"Parameter Groups error: parameter_groups.Database Group: The grouped "
+"parameter SomethingNotHere does not reference a valid parameter."
msgstr ""
-#: heat/tests/test_validate.py:1428
+#: heat/tests/test_validate.py:1507
msgid ""
-"Parameter Groups error : parameter_groups.Database Group: The grouped "
-"parameter SomethingNotHere does not reference a valid parameter."
+"Parameter Groups error: parameter_groups.: The grouped parameter key_name"
+" does not reference a valid parameter."
msgstr ""
-#: heat/tests/test_validate.py:1441
+#: heat/tests/test_validate.py:1520
msgid ""
-"Parameter Groups error : parameter_groups.Server Group: The parameters "
+"Parameter Groups error: parameter_groups.Server Group: The parameters "
"must be provided for each parameter group."
msgstr ""
-#: heat/tests/test_validate.py:1452
+#: heat/tests/test_validate.py:1531
+msgid ""
+"Parameter Groups error: parameter_groups: The parameter_groups should be "
+"a list."
+msgstr ""
+
+#: heat/tests/test_validate.py:1542
msgid ""
-"Parameter Groups error : parameter_groups: The parameter_groups should be"
-" a list."
+"Parameter Groups error: parameter_groups.Server Group: The parameters of "
+"parameter group should be a list."
msgstr ""
-#: heat/tests/test_validate.py:1463
+#: heat/tests/test_validate.py:1554
msgid ""
-"Parameter Groups error : parameter_groups.Server Group: The parameters of"
-" parameter group should be a list."
+"Parameter Groups error: parameter_groups.: The parameters of parameter "
+"group should be a list."
+msgstr ""
+
+#: heat/tests/nova/test_server.py:1155
+#, python-format
+msgid ""
+"Properties \"uuid\" and \"network\" are both set to the network "
+"\"%(network)s\" for the server \"%(server)s\". The \"uuid\" property is "
+"deprecated. Use only \"network\" property."
+msgstr ""
+
+#: heat/tests/nova/test_server.py:1201
+#, python-format
+msgid ""
+"One of the properties \"network\", \"port\", \"uuid\" should be set for "
+"the specified network of server \"%s\"."
msgstr ""
diff --git a/heat/locale/ko_KR/LC_MESSAGES/heat-log-error.po b/heat/locale/ko_KR/LC_MESSAGES/heat-log-error.po
index 23acbe51e..23261233f 100644
--- a/heat/locale/ko_KR/LC_MESSAGES/heat-log-error.po
+++ b/heat/locale/ko_KR/LC_MESSAGES/heat-log-error.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Heat\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-05-30 06:05+0000\n"
-"PO-Revision-Date: 2015-05-29 10:46+0000\n"
+"POT-Creation-Date: 2015-07-02 06:02+0000\n"
+"PO-Revision-Date: 2015-07-01 07:19+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/heat/"
"language/ko_KR/)\n"
@@ -20,10 +20,6 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=1; plural=0;\n"
-#, python-format
-msgid "%(opname) %(ci)s failed for %(a)s on %(sid)s"
-msgstr "%(sid)s의 %(a)s를 위한 %(opname) %(ci)s 실패 "
-
msgid ""
"Could not allocate floating IP. Probably there is no default floating IP "
"pool is configured."
@@ -133,9 +129,6 @@ msgstr "예기치 않은 작업 %s를 갱신하려 했습니다."
msgid "Unexpected number of keys in watch_data.data!"
msgstr "watch_data.data의 키의 예기치 않은 수!"
-msgid "Unhandled exception"
-msgstr "처리되지 않은 예외"
-
msgid "failed to get lifecycle plug point classes"
msgstr "라이프 사이클 플러그 포인트 클래스를 가져 오지 못했습니다"
@@ -146,11 +139,5 @@ msgstr "스택 수명주기 %s 클래스의 인스턴스를 실패"
msgid "failed to sort lifecycle plug point classes"
msgstr "라이프 사이클 플러그 포인트 클래스를 정렬하는 데 실패"
-msgid "in dynamic looping call"
-msgstr "동적 루프 호출에서"
-
-msgid "in fixed duration looping call"
-msgstr "고정 기간 루프 호출에서"
-
msgid "trust token re-scoping failed!"
msgstr "신뢰할 수 있는 token 재설정 실패!"
diff --git a/heat/locale/pt_BR/LC_MESSAGES/heat-log-error.po b/heat/locale/pt_BR/LC_MESSAGES/heat-log-error.po
index 1b784e51a..2854e4f9e 100644
--- a/heat/locale/pt_BR/LC_MESSAGES/heat-log-error.po
+++ b/heat/locale/pt_BR/LC_MESSAGES/heat-log-error.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Heat\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-05-30 06:05+0000\n"
-"PO-Revision-Date: 2015-05-29 10:46+0000\n"
+"POT-Creation-Date: 2015-07-08 06:02+0000\n"
+"PO-Revision-Date: 2015-07-07 07:51+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/heat/"
"language/pt_BR/)\n"
@@ -37,8 +37,10 @@ msgid "Failed to import module %s"
msgstr "Falha ao importar módulo %s"
#, python-format
-msgid "Removing dead child %s"
-msgstr "Removendo filho inativo %s"
+msgid "Not respawning child %d, cannot recover from termination"
+msgstr ""
+"Não está reproduzindo o filho %d, não é possível recuperar a partir da "
+"rescisão"
#, python-format
msgid "Request does not contain %s parameter!"
@@ -56,12 +58,3 @@ msgstr "SIGHUP recebido"
msgid "SIGTERM received"
msgstr "SIGTERM recebido"
-
-msgid "Unhandled exception"
-msgstr "Exceção não tratada"
-
-msgid "in dynamic looping call"
-msgstr "em chamada de laço dinâmico"
-
-msgid "in fixed duration looping call"
-msgstr "em uma chamada de laço de duração fixa"
diff --git a/heat/objects/raw_template.py b/heat/objects/raw_template.py
index 1f7bacff8..b1d5bd654 100644
--- a/heat/objects/raw_template.py
+++ b/heat/objects/raw_template.py
@@ -20,7 +20,6 @@ RawTemplate object
import copy
from oslo_config import cfg
-from oslo_utils import encodeutils
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
@@ -56,10 +55,9 @@ class RawTemplate(
env_fmt.ENCRYPTED_PARAM_NAMES]
for param_name in encrypted_param_names:
- decrypt_function_name = parameters[param_name][0]
- decrypt_function = getattr(crypt, decrypt_function_name)
- decrypted_val = decrypt_function(parameters[param_name][1])
- parameters[param_name] = encodeutils.safe_decode(decrypted_val)
+ method, value = parameters[param_name]
+ decrypted_val = crypt.decrypt(method, value)
+ parameters[param_name] = decrypted_val
tpl.environment[env_fmt.PARAMETERS] = parameters
tpl._context = context
@@ -78,8 +76,7 @@ class RawTemplate(
if not tmpl.param_schemata()[param_name].hidden:
continue
clear_text_val = tmpl.env.params.get(param_name)
- encoded_val = encodeutils.safe_encode(clear_text_val)
- tmpl.env.params[param_name] = crypt.encrypt(encoded_val)
+ tmpl.env.params[param_name] = crypt.encrypt(clear_text_val)
tmpl.env.encrypted_param_names.append(param_name)
@classmethod
diff --git a/heat/objects/resource.py b/heat/objects/resource.py
index d8dbca967..821ba4b8e 100755
--- a/heat/objects/resource.py
+++ b/heat/objects/resource.py
@@ -20,7 +20,6 @@ Resource object
from oslo_config import cfg
from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
import six
@@ -85,9 +84,8 @@ class Resource(
if resource.properties_data_encrypted and resource.properties_data:
properties_data = {}
for prop_name, prop_value in resource.properties_data.items():
- decrypt_function_name = prop_value[0]
- decrypt_function = getattr(crypt, decrypt_function_name, None)
- decrypted_value = decrypt_function(prop_value[1])
+ method, value = prop_value
+ decrypted_value = crypt.decrypt(method, value)
prop_string = jsonutils.loads(decrypted_value)
properties_data[prop_name] = prop_string
resource.properties_data = properties_data
@@ -185,8 +183,7 @@ class Resource(
result = {}
for prop_name, prop_value in data.items():
prop_string = jsonutils.dumps(prop_value)
- encoded_value = encodeutils.safe_encode(prop_string)
- encrypted_value = crypt.encrypt(encoded_value)
+ encrypted_value = crypt.encrypt(prop_string)
result[prop_name] = encrypted_value
return (True, result)
return (False, data)
diff --git a/heat/rpc/api.py b/heat/rpc/api.py
index 283fa74e7..9ba60c888 100644
--- a/heat/rpc/api.py
+++ b/heat/rpc/api.py
@@ -19,13 +19,13 @@ PARAM_KEYS = (
PARAM_SHOW_DELETED, PARAM_SHOW_NESTED, PARAM_EXISTING,
PARAM_CLEAR_PARAMETERS, PARAM_GLOBAL_TENANT, PARAM_LIMIT,
PARAM_NESTED_DEPTH, PARAM_TAGS, PARAM_SHOW_HIDDEN, PARAM_TAGS_ANY,
- PARAM_NOT_TAGS, PARAM_NOT_TAGS_ANY, TEMPLATE_TYPE,
+ PARAM_NOT_TAGS, PARAM_NOT_TAGS_ANY, TEMPLATE_TYPE, PARAM_WITH_DETAIL
) = (
'timeout_mins', 'disable_rollback', 'adopt_stack_data',
'show_deleted', 'show_nested', 'existing',
'clear_parameters', 'global_tenant', 'limit',
'nested_depth', 'tags', 'show_hidden', 'tags_any',
- 'not_tags', 'not_tags_any', 'template_type',
+ 'not_tags', 'not_tags_any', 'template_type', 'with_detail',
)
STACK_KEYS = (
diff --git a/heat/rpc/client.py b/heat/rpc/client.py
index 911e2dbc3..6c5df7441 100644
--- a/heat/rpc/client.py
+++ b/heat/rpc/client.py
@@ -32,6 +32,7 @@ class EngineClient(object):
1.9 - Add template_type option to generate_template()
1.10 - Add support for software config list
1.11 - Add support for template versions list
+ 1.12 - Add with_detail option for stack resources list
'''
BASE_RPC_API_VERSION = '1.0'
@@ -421,16 +422,21 @@ class EngineClient(object):
stack_identity=stack_identity,
resource_name=resource_name))
- def list_stack_resources(self, ctxt, stack_identity, nested_depth=0):
+ def list_stack_resources(self, ctxt, stack_identity,
+ nested_depth=0, with_detail=False):
"""
List the resources belonging to a stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param nested_depth: Levels of nested stacks of which list resources.
+ :param with_detail: show detail for resoruces in list.
"""
- return self.call(ctxt, self.make_msg('list_stack_resources',
- stack_identity=stack_identity,
- nested_depth=nested_depth))
+ return self.call(ctxt,
+ self.make_msg('list_stack_resources',
+ stack_identity=stack_identity,
+ nested_depth=nested_depth,
+ with_detail=with_detail),
+ version='1.12')
def stack_suspend(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_suspend',
diff --git a/heat/scaling/cooldown.py b/heat/scaling/cooldown.py
index 75a55b9bb..bf7c82fb0 100644
--- a/heat/scaling/cooldown.py
+++ b/heat/scaling/cooldown.py
@@ -18,7 +18,8 @@ import six
class CooldownMixin(object):
'''
Utility class to encapsulate Cooldown related logic which is shared
- between AutoScalingGroup and ScalingPolicy
+ between AutoScalingGroup and ScalingPolicy. This logic includes both
+ cooldown timestamp comparing and scaling in progress checking.
'''
def _cooldown_inprogress(self):
inprogress = False
@@ -30,16 +31,33 @@ class CooldownMixin(object):
cooldown = 0
metadata = self.metadata_get()
- if metadata and cooldown != 0:
- last_adjust = next(six.iterkeys(metadata))
+ if metadata.get('scaling_in_progress'):
+ return True
+
+ if 'cooldown' not in metadata:
+ # Note: this is for supporting old version cooldown checking
+ if metadata and cooldown != 0:
+ last_adjust = next(six.iterkeys(metadata))
+ if not timeutils.is_older_than(last_adjust, cooldown):
+ inprogress = True
+ elif cooldown != 0:
+ last_adjust = next(six.iterkeys(metadata['cooldown']))
if not timeutils.is_older_than(last_adjust, cooldown):
inprogress = True
+
+ if not inprogress:
+ metadata['scaling_in_progress'] = True
+ self.metadata_set(metadata)
+
return inprogress
def _cooldown_timestamp(self, reason):
- # Save resource metadata with a timestamp and reason
+ # Save cooldown timestamp into metadata and clean the
+ # scaling_in_progress state.
# If we wanted to implement the AutoScaling API like AWS does,
# we could maintain event history here, but since we only need
# the latest event for cooldown, just store that for now
- metadata = {timeutils.utcnow().isoformat(): reason}
+ metadata = self.metadata_get()
+ metadata['cooldown'] = {timeutils.utcnow().isoformat(): reason}
+ metadata['scaling_in_progress'] = False
self.metadata_set(metadata)
diff --git a/heat/tests/api/__init__.py b/heat/tests/api/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/api/__init__.py
diff --git a/heat/tests/api/aws/__init__.py b/heat/tests/api/aws/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/api/aws/__init__.py
diff --git a/heat/tests/test_api_aws.py b/heat/tests/api/aws/test_api_aws.py
index 98d199f4f..98d199f4f 100644
--- a/heat/tests/test_api_aws.py
+++ b/heat/tests/api/aws/test_api_aws.py
diff --git a/heat/tests/test_api_ec2token.py b/heat/tests/api/aws/test_api_ec2token.py
index 006dc1f92..006dc1f92 100644
--- a/heat/tests/test_api_ec2token.py
+++ b/heat/tests/api/aws/test_api_ec2token.py
diff --git a/heat/tests/api/cfn/__init__.py b/heat/tests/api/cfn/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/api/cfn/__init__.py
diff --git a/heat/tests/test_api_cfn_v1.py b/heat/tests/api/cfn/test_api_cfn_v1.py
index d84d27f63..619179027 100644
--- a/heat/tests/test_api_cfn_v1.py
+++ b/heat/tests/api/cfn/test_api_cfn_v1.py
@@ -29,7 +29,7 @@ from heat.rpc import client as rpc_client
from heat.tests import common
from heat.tests import utils
-policy_path = os.path.dirname(os.path.realpath(__file__)) + "/policy/"
+policy_path = os.path.dirname(os.path.realpath(__file__)) + "/../../policy/"
class CfnStackControllerTest(common.HeatTestCase):
@@ -41,12 +41,12 @@ class CfnStackControllerTest(common.HeatTestCase):
def setUp(self):
super(CfnStackControllerTest, self).setUp()
- opts = [
+ self.opts = [
cfg.StrOpt('config_dir', default=policy_path),
cfg.StrOpt('config_file', default='foo'),
cfg.StrOpt('project', default='heat'),
]
- cfg.CONF.register_opts(opts)
+ cfg.CONF.register_opts(self.opts)
cfg.CONF.set_default('host', 'host')
self.topic = rpc_api.ENGINE_TOPIC
self.api_version = '1.0'
@@ -62,6 +62,10 @@ class CfnStackControllerTest(common.HeatTestCase):
'deny_stack_user.json')
self.addCleanup(self.m.VerifyAll)
+ def tearDown(self):
+ super(CfnStackControllerTest, self).tearDown()
+ cfg.CONF.unregister_opts(self.opts)
+
def _dummy_GET_request(self, params=None):
# Mangle the params dict into a query string
params = params or {}
@@ -1594,7 +1598,9 @@ class CfnStackControllerTest(common.HeatTestCase):
rpc_client.EngineClient.call(
dummy_req.context,
('list_stack_resources', {'stack_identity': identity,
- 'nested_depth': 0})
+ 'nested_depth': 0,
+ 'with_detail': False}),
+ version='1.12'
).AndReturn(engine_resp)
self.m.ReplayAll()
diff --git a/heat/tests/api/cloudwatch/__init__.py b/heat/tests/api/cloudwatch/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/api/cloudwatch/__init__.py
diff --git a/heat/tests/test_api_cloudwatch.py b/heat/tests/api/cloudwatch/test_api_cloudwatch.py
index f7e7988cc..8cc85dab7 100644
--- a/heat/tests/test_api_cloudwatch.py
+++ b/heat/tests/api/cloudwatch/test_api_cloudwatch.py
@@ -32,6 +32,33 @@ class WatchControllerTest(common.HeatTestCase):
the endpoint processing API requests after they are routed
'''
+ def setUp(self):
+ super(WatchControllerTest, self).setUp()
+ self.path = os.path.dirname(os.path.realpath(__file__))
+ self.policy_path = self.path + "/../../policy/"
+ self.opts = [
+ cfg.StrOpt('config_dir', default=self.policy_path),
+ cfg.StrOpt('config_file', default='foo'),
+ cfg.StrOpt('project', default='heat'),
+ ]
+ cfg.CONF.register_opts(self.opts)
+ cfg.CONF.set_default('host', 'host')
+ self.topic = rpc_api.ENGINE_TOPIC
+ self.api_version = '1.0'
+
+ # Create WSGI controller instance
+ class DummyConfig(object):
+ bind_port = 8003
+ cfgopts = DummyConfig()
+ self.controller = watches.WatchController(options=cfgopts)
+ self.controller.policy.enforcer.policy_path = (self.policy_path +
+ 'deny_stack_user.json')
+ self.addCleanup(self.m.VerifyAll)
+
+ def tearDown(self):
+ super(WatchControllerTest, self).tearDown()
+ cfg.CONF.unregister_opts(self.opts)
+
def _dummy_GET_request(self, params=None):
# Mangle the params dict into a query string
params = params or {}
@@ -488,26 +515,3 @@ class WatchControllerTest(common.HeatTestCase):
# should raise HeatInvalidParameterValueError
result = self.controller.set_alarm_state(dummy_req)
self.assertIsInstance(result, exception.HeatInvalidParameterValueError)
-
- def setUp(self):
- super(WatchControllerTest, self).setUp()
- self.path = os.path.dirname(os.path.realpath(__file__))
- self.policy_path = self.path + "/policy/"
- opts = [
- cfg.StrOpt('config_dir', default=self.policy_path),
- cfg.StrOpt('config_file', default='foo'),
- cfg.StrOpt('project', default='heat'),
- ]
- cfg.CONF.register_opts(opts)
- cfg.CONF.set_default('host', 'host')
- self.topic = rpc_api.ENGINE_TOPIC
- self.api_version = '1.0'
-
- # Create WSGI controller instance
- class DummyConfig(object):
- bind_port = 8003
- cfgopts = DummyConfig()
- self.controller = watches.WatchController(options=cfgopts)
- self.controller.policy.enforcer.policy_path = (self.policy_path +
- 'deny_stack_user.json')
- self.addCleanup(self.m.VerifyAll)
diff --git a/heat/tests/api/middleware/__init__.py b/heat/tests/api/middleware/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/api/middleware/__init__.py
diff --git a/heat/tests/test_ssl_middleware.py b/heat/tests/api/middleware/test_ssl_middleware.py
index 5700f683b..5700f683b 100644
--- a/heat/tests/test_ssl_middleware.py
+++ b/heat/tests/api/middleware/test_ssl_middleware.py
diff --git a/heat/tests/test_version_negotiation_middleware.py b/heat/tests/api/middleware/test_version_negotiation_middleware.py
index b720d9601..b720d9601 100644
--- a/heat/tests/test_version_negotiation_middleware.py
+++ b/heat/tests/api/middleware/test_version_negotiation_middleware.py
diff --git a/heat/tests/api/openstack/__init__.py b/heat/tests/api/openstack/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/api/openstack/__init__.py
diff --git a/heat/tests/test_api_openstack_v1.py b/heat/tests/api/openstack/test_api_openstack_v1.py
index 9e68b1063..6ee70b87c 100644
--- a/heat/tests/test_api_openstack_v1.py
+++ b/heat/tests/api/openstack/test_api_openstack_v1.py
@@ -773,6 +773,50 @@ class StackControllerTest(ControllerTest, common.HeatTestCase):
self.m.VerifyAll()
+ def test_create_with_tags(self, mock_enforce):
+ self._mock_enforce_setup(mock_enforce, 'create', True)
+ identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
+ template = {u'Foo': u'bar'}
+ parameters = {u'InstanceType': u'm1.xlarge'}
+ body = {'template': template,
+ 'stack_name': identity.stack_name,
+ 'parameters': parameters,
+ 'tags': 'tag1,tag2',
+ 'timeout_mins': 30}
+
+ req = self._post('/stacks', json.dumps(body))
+
+ self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
+ rpc_client.EngineClient.call(
+ req.context,
+ ('create_stack',
+ {'stack_name': identity.stack_name,
+ 'template': template,
+ 'params': {'parameters': parameters,
+ 'encrypted_param_names': [],
+ 'parameter_defaults': {},
+ 'resource_registry': {}},
+ 'files': {},
+ 'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
+ 'owner_id': None,
+ 'nested_depth': 0,
+ 'user_creds_id': None,
+ 'parent_resource_name': None,
+ 'stack_user_project_id': None}),
+ version='1.8'
+ ).AndReturn(dict(identity))
+ self.m.ReplayAll()
+
+ response = self.controller.create(req,
+ tenant_id=identity.tenant,
+ body=body)
+
+ expected = {'stack':
+ {'id': '1',
+ 'links': [{'href': self._url(identity), 'rel': 'self'}]}}
+ self.assertEqual(expected, response)
+ self.m.VerifyAll()
+
def test_adopt(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
@@ -1535,6 +1579,43 @@ class StackControllerTest(ControllerTest, common.HeatTestCase):
body=body)
self.m.VerifyAll()
+ def test_update_with_tags(self, mock_enforce):
+ self._mock_enforce_setup(mock_enforce, 'update', True)
+ identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
+ template = {u'Foo': u'bar'}
+ parameters = {u'InstanceType': u'm1.xlarge'}
+ body = {'template': template,
+ 'parameters': parameters,
+ 'files': {},
+ 'tags': 'tag1,tag2',
+ 'timeout_mins': 30}
+
+ req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
+ json.dumps(body))
+
+ self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
+ rpc_client.EngineClient.call(
+ req.context,
+ ('update_stack',
+ {'stack_identity': dict(identity),
+ 'template': template,
+ 'params': {'parameters': parameters,
+ 'encrypted_param_names': [],
+ 'parameter_defaults': {},
+ 'resource_registry': {}},
+ 'files': {},
+ 'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}})
+ ).AndReturn(dict(identity))
+ self.m.ReplayAll()
+
+ self.assertRaises(webob.exc.HTTPAccepted,
+ self.controller.update,
+ req, tenant_id=identity.tenant,
+ stack_name=identity.stack_name,
+ stack_id=identity.stack_id,
+ body=body)
+ self.m.VerifyAll()
+
def test_update_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
@@ -1658,6 +1739,44 @@ class StackControllerTest(ControllerTest, common.HeatTestCase):
body=body)
self.m.VerifyAll()
+ def test_update_with_existing_parameters_with_tags(self, mock_enforce):
+ self._mock_enforce_setup(mock_enforce, 'update_patch', True)
+ identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
+ template = {u'Foo': u'bar'}
+ body = {'template': template,
+ 'parameters': {},
+ 'files': {},
+ 'tags': 'tag1,tag2',
+ 'timeout_mins': 30}
+
+ req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
+ json.dumps(body))
+
+ self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
+ rpc_client.EngineClient.call(
+ req.context,
+ ('update_stack',
+ {'stack_identity': dict(identity),
+ 'template': template,
+ 'params': {'parameters': {},
+ 'encrypted_param_names': [],
+ 'parameter_defaults': {},
+ 'resource_registry': {}},
+ 'files': {},
+ 'args': {rpc_api.PARAM_EXISTING: True,
+ 'timeout_mins': 30,
+ 'tags': ['tag1', 'tag2']}})
+ ).AndReturn(dict(identity))
+ self.m.ReplayAll()
+
+ self.assertRaises(webob.exc.HTTPAccepted,
+ self.controller.update_patch,
+ req, tenant_id=identity.tenant,
+ stack_name=identity.stack_name,
+ stack_id=identity.stack_id,
+ body=body)
+ self.m.VerifyAll()
+
def test_update_with_patched_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
@@ -2237,7 +2356,10 @@ class ResourceControllerTest(ControllerTest, common.HeatTestCase):
rpc_client.EngineClient.call(
req.context,
('list_stack_resources', {'stack_identity': stack_identity,
- 'nested_depth': 0})
+ 'nested_depth': 0,
+ 'with_detail': False,
+ }),
+ version='1.12'
).AndReturn(engine_resp)
self.m.ReplayAll()
@@ -2274,13 +2396,16 @@ class ResourceControllerTest(ControllerTest, common.HeatTestCase):
rpc_client.EngineClient.call(
req.context,
('list_stack_resources', {'stack_identity': stack_identity,
- 'nested_depth': 0})
+ 'nested_depth': 0,
+ 'with_detail': False}),
+ version='1.12'
).AndRaise(to_remote_error(error))
self.m.ReplayAll()
resp = request_with_middleware(fault.FaultWrapper,
self.controller.index,
- req, tenant_id=self.tenant,
+ req,
+ tenant_id=self.tenant,
stack_name=stack_identity.stack_name,
stack_id=stack_identity.stack_id)
@@ -2300,7 +2425,9 @@ class ResourceControllerTest(ControllerTest, common.HeatTestCase):
rpc_client.EngineClient.call(
req.context,
('list_stack_resources', {'stack_identity': stack_identity,
- 'nested_depth': 99})
+ 'nested_depth': 99,
+ 'with_detail': False}),
+ version='1.12'
).AndReturn([])
self.m.ReplayAll()
@@ -2348,6 +2475,80 @@ class ResourceControllerTest(ControllerTest, common.HeatTestCase):
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
+ def test_index_detail(self, mock_enforce):
+ self._mock_enforce_setup(mock_enforce, 'index', True)
+ res_name = 'WikiDatabase'
+ stack_identity = identifier.HeatIdentifier(self.tenant,
+ 'wordpress', '1')
+ res_identity = identifier.ResourceIdentifier(resource_name=res_name,
+ **stack_identity)
+
+ req = self._get(stack_identity._tenant_path() + '/resources',
+ {'with_detail': 'true'})
+
+ resp_parameters = {
+ "OS::project_id": "3ab5b02fa01f4f95afa1e254afc4a435",
+ "network": "cf05086d-07c7-4ed6-95e5-e4af724677e6",
+ "OS::stack_name": "s1", "admin_pass": "******",
+ "key_name": "kk", "image": "fa5d387e-541f-4dfb-ae8a-83a614683f84",
+ "db_port": "50000",
+ "OS::stack_id": "723d7cee-46b3-4433-9c21-f3378eb0bfc4",
+ "flavor": "1"
+ },
+
+ engine_resp = [
+ {
+ u'resource_identity': dict(res_identity),
+ u'stack_name': stack_identity.stack_name,
+ u'resource_name': res_name,
+ u'resource_status_reason': None,
+ u'updated_time': u'2012-07-23T13:06:00Z',
+ u'stack_identity': stack_identity,
+ u'resource_action': u'CREATE',
+ u'resource_status': u'COMPLETE',
+ u'physical_resource_id':
+ u'a3455d8c-9f88-404d-a85b-5315293e67de',
+ u'resource_type': u'AWS::EC2::Instance',
+ u'parameters': resp_parameters,
+ u'description': u'Hello description',
+ u'stack_user_project_id': u'6f38bcfebbc4400b82d50c1a2ea3057d',
+ }
+ ]
+ self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
+ rpc_client.EngineClient.call(
+ req.context,
+ ('list_stack_resources', {'stack_identity': stack_identity,
+ 'nested_depth': 0,
+ 'with_detail': True}),
+ version='1.12'
+ ).AndReturn(engine_resp)
+ self.m.ReplayAll()
+
+ result = self.controller.index(req, tenant_id=self.tenant,
+ stack_name=stack_identity.stack_name,
+ stack_id=stack_identity.stack_id)
+
+ expected = {
+ 'resources': [{'links': [{'href': self._url(res_identity),
+ 'rel': 'self'},
+ {'href': self._url(stack_identity),
+ 'rel': 'stack'}],
+ u'resource_name': res_name,
+ u'logical_resource_id': res_name,
+ u'resource_status_reason': None,
+ u'updated_time': u'2012-07-23T13:06:00Z',
+ u'resource_status': u'CREATE_COMPLETE',
+ u'physical_resource_id':
+ u'a3455d8c-9f88-404d-a85b-5315293e67de',
+ u'resource_type': u'AWS::EC2::Instance',
+ u'parameters': resp_parameters,
+ u'description': u'Hello description',
+ u'stack_user_project_id':
+ u'6f38bcfebbc4400b82d50c1a2ea3057d'}]}
+
+ self.assertEqual(expected, result)
+ self.m.VerifyAll()
+
def test_show(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
res_name = 'WikiDatabase'
diff --git a/heat/tests/test_api_openstack_v1_util.py b/heat/tests/api/openstack/test_api_openstack_v1_util.py
index 9c832c317..9c832c317 100644
--- a/heat/tests/test_api_openstack_v1_util.py
+++ b/heat/tests/api/openstack/test_api_openstack_v1_util.py
diff --git a/heat/tests/test_api_openstack_v1_views_stacks_view_builder.py b/heat/tests/api/openstack/test_api_openstack_v1_views_stacks_view_builder.py
index cbf904830..cbf904830 100644
--- a/heat/tests/test_api_openstack_v1_views_stacks_view_builder.py
+++ b/heat/tests/api/openstack/test_api_openstack_v1_views_stacks_view_builder.py
diff --git a/heat/tests/test_api_openstack_v1_views_views_common.py b/heat/tests/api/openstack/test_api_openstack_v1_views_views_common.py
index 981dcc542..981dcc542 100644
--- a/heat/tests/test_api_openstack_v1_views_views_common.py
+++ b/heat/tests/api/openstack/test_api_openstack_v1_views_views_common.py
diff --git a/heat/tests/test_wsgi.py b/heat/tests/api/test_wsgi.py
index df2a8a678..c7b32fe36 100644
--- a/heat/tests/test_wsgi.py
+++ b/heat/tests/api/test_wsgi.py
@@ -15,13 +15,16 @@
# under the License.
+import fixtures
import json
-
-from oslo_config import cfg
+import mock
import six
+import socket
import stubout
import webob
+from oslo_config import cfg
+
from heat.api.aws import exception as aws_exception
from heat.common import exception
from heat.common import wsgi
@@ -398,3 +401,77 @@ class JSONRequestDeserializerTest(common.HeatTestCase):
'(%s bytes) exceeds maximum allowed size (%s bytes).' % (
len(body), cfg.CONF.max_json_body_size))
self.assertEqual(msg, six.text_type(error))
+
+
+class GetSocketTestCase(common.HeatTestCase):
+
+ def setUp(self):
+ super(GetSocketTestCase, self).setUp()
+ self.useFixture(fixtures.MonkeyPatch(
+ "heat.common.wsgi.get_bind_addr",
+ lambda x, y: ('192.168.0.13', 1234)))
+ addr_info_list = [(2, 1, 6, '', ('192.168.0.13', 80)),
+ (2, 2, 17, '', ('192.168.0.13', 80)),
+ (2, 3, 0, '', ('192.168.0.13', 80))]
+ self.useFixture(fixtures.MonkeyPatch(
+ "heat.common.wsgi.socket.getaddrinfo",
+ lambda *x: addr_info_list))
+ self.useFixture(fixtures.MonkeyPatch(
+ "heat.common.wsgi.time.time",
+ mock.Mock(side_effect=[0, 1, 5, 10, 20, 35])))
+ wsgi.cfg.CONF.heat_api.cert_file = '/etc/ssl/cert'
+ wsgi.cfg.CONF.heat_api.key_file = '/etc/ssl/key'
+ wsgi.cfg.CONF.heat_api.ca_file = '/etc/ssl/ca_cert'
+ wsgi.cfg.CONF.heat_api.tcp_keepidle = 600
+
+ def test_correct_configure_socket(self):
+ mock_socket = mock.Mock()
+ self.useFixture(fixtures.MonkeyPatch(
+ 'heat.common.wsgi.ssl.wrap_socket',
+ mock_socket))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'heat.common.wsgi.eventlet.listen',
+ lambda *x, **y: mock_socket))
+ server = wsgi.Server(name='heat-api', conf=cfg.CONF.heat_api)
+ server.default_port = 1234
+ server.configure_socket()
+ self.assertIn(mock.call.setsockopt(
+ socket.SOL_SOCKET,
+ socket.SO_REUSEADDR,
+ 1), mock_socket.mock_calls)
+ self.assertIn(mock.call.setsockopt(
+ socket.SOL_SOCKET,
+ socket.SO_KEEPALIVE,
+ 1), mock_socket.mock_calls)
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ self.assertIn(mock.call().setsockopt(
+ socket.IPPROTO_TCP,
+ socket.TCP_KEEPIDLE,
+ wsgi.cfg.CONF.heat_api.tcp_keepidle), mock_socket.mock_calls)
+
+ def test_get_socket_without_all_ssl_reqs(self):
+ wsgi.cfg.CONF.heat_api.key_file = None
+ self.assertRaises(RuntimeError, wsgi.get_socket,
+ wsgi.cfg.CONF.heat_api, 1234)
+
+ def test_get_socket_with_bind_problems(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'heat.common.wsgi.eventlet.listen',
+ mock.Mock(side_effect=(
+ [wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None]))))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'heat.common.wsgi.ssl.wrap_socket',
+ lambda *x, **y: None))
+
+ self.assertRaises(RuntimeError, wsgi.get_socket,
+ wsgi.cfg.CONF.heat_api, 1234)
+
+ def test_get_socket_with_unexpected_socket_errno(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'heat.common.wsgi.eventlet.listen',
+ mock.Mock(side_effect=wsgi.socket.error(socket.errno.ENOMEM))))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'heat.common.wsgi.ssl.wrap_socket',
+ lambda *x, **y: None))
+ self.assertRaises(wsgi.socket.error, wsgi.get_socket,
+ wsgi.cfg.CONF.heat_api, 1234)
diff --git a/heat/tests/autoscaling/test_heat_scaling_group.py b/heat/tests/autoscaling/test_heat_scaling_group.py
index 35d548ea7..2e8214374 100644
--- a/heat/tests/autoscaling/test_heat_scaling_group.py
+++ b/heat/tests/autoscaling/test_heat_scaling_group.py
@@ -247,6 +247,7 @@ class TestGroupAdjust(common.HeatTestCase):
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
+ self.patchobject(self.group, '_cooldown_timestamp')
self.assertRaises(ValueError, self.group.adjust, 1)
expected_notifies = [
@@ -276,6 +277,7 @@ class TestGroupAdjust(common.HeatTestCase):
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
+ self.patchobject(self.group, '_cooldown_timestamp')
self.assertRaises(ValueError, self.group.adjust,
5, adjustment_type='ExactCapacity')
diff --git a/heat/tests/autoscaling/test_heat_scaling_policy.py b/heat/tests/autoscaling/test_heat_scaling_policy.py
index 38589d3a1..68d082a56 100644
--- a/heat/tests/autoscaling/test_heat_scaling_policy.py
+++ b/heat/tests/autoscaling/test_heat_scaling_policy.py
@@ -130,23 +130,38 @@ class TestCooldownMixin(common.HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
- def test_is_in_progress(self):
+ def test_cooldown_is_in_progress_toosoon(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'my-policy')
now = timeutils.utcnow()
- previous_meta = {now.isoformat(): 'ChangeInCapacity : 1'}
+ previous_meta = {'cooldown': {
+ now.isoformat(): 'ChangeInCapacity : 1'}}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertTrue(pol._cooldown_inprogress())
- def test_not_in_progress(self):
+ def test_cooldown_is_in_progress_scaling_unfinished(self):
+ t = template_format.parse(as_template)
+ stack = utils.parse_stack(t, params=as_params)
+ pol = self.create_scaling_policy(t, stack, 'my-policy')
+
+ previous_meta = {'scaling_in_progress': True}
+ self.patchobject(pol, 'metadata_get', return_value=previous_meta)
+ self.assertTrue(pol._cooldown_inprogress())
+
+ def test_cooldown_not_in_progress(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'my-policy')
awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
- previous_meta = {awhile_ago.isoformat(): 'ChangeInCapacity : 1'}
+ previous_meta = {
+ 'cooldown': {
+ awhile_ago.isoformat(): 'ChangeInCapacity : 1'
+ },
+ 'scaling_in_progress': False
+ }
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertFalse(pol._cooldown_inprogress())
@@ -161,7 +176,8 @@ class TestCooldownMixin(common.HeatTestCase):
pol = self.create_scaling_policy(t, stack, 'my-policy')
now = timeutils.utcnow()
- previous_meta = {now.isoformat(): 'ChangeInCapacity : 1'}
+ previous_meta = {'cooldown': {
+ now.isoformat(): 'ChangeInCapacity : 1'}}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertFalse(pol._cooldown_inprogress())
@@ -177,7 +193,8 @@ class TestCooldownMixin(common.HeatTestCase):
pol = self.create_scaling_policy(t, stack, 'my-policy')
now = timeutils.utcnow()
- previous_meta = {now.isoformat(): 'ChangeInCapacity : 1'}
+ previous_meta = {'cooldown': {
+ now.isoformat(): 'ChangeInCapacity : 1'}}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertFalse(pol._cooldown_inprogress())
@@ -191,7 +208,9 @@ class TestCooldownMixin(common.HeatTestCase):
meta_set = self.patchobject(pol, 'metadata_set')
self.patchobject(timeutils, 'utcnow', return_value=nowish)
pol._cooldown_timestamp(reason)
- meta_set.assert_called_once_with({nowish.isoformat(): reason})
+ meta_set.assert_called_once_with(
+ {'cooldown': {nowish.isoformat(): reason},
+ 'scaling_in_progress': False})
class ScalingPolicyAttrTest(common.HeatTestCase):
diff --git a/heat/tests/autoscaling/test_scaling_group.py b/heat/tests/autoscaling/test_scaling_group.py
index 890179fb0..4c238763f 100644
--- a/heat/tests/autoscaling/test_scaling_group.py
+++ b/heat/tests/autoscaling/test_scaling_group.py
@@ -434,6 +434,7 @@ class TestGroupAdjust(common.HeatTestCase):
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
+ self.patchobject(self.group, '_cooldown_timestamp')
self.assertRaises(ValueError, self.group.adjust, 1)
expected_notifies = [
@@ -463,6 +464,7 @@ class TestGroupAdjust(common.HeatTestCase):
notify = self.patch('heat.engine.notification.autoscaling.send')
self.patchobject(self.group, '_cooldown_inprogress',
return_value=False)
+ self.patchobject(self.group, '_cooldown_timestamp')
self.assertRaises(ValueError, self.group.adjust,
5, adjustment_type='ExactCapacity')
diff --git a/heat/tests/autoscaling/test_scaling_policy.py b/heat/tests/autoscaling/test_scaling_policy.py
index 8476024a8..f586cbea6 100644
--- a/heat/tests/autoscaling/test_scaling_policy.py
+++ b/heat/tests/autoscaling/test_scaling_policy.py
@@ -135,23 +135,38 @@ class TestCooldownMixin(common.HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
- def test_is_in_progress(self):
+ def test_cooldown_is_in_progress_toosoon(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
now = timeutils.utcnow()
- previous_meta = {now.isoformat(): 'ChangeInCapacity : 1'}
+ previous_meta = {'cooldown': {
+ now.isoformat(): 'ChangeInCapacity : 1'}}
+ self.patchobject(pol, 'metadata_get', return_value=previous_meta)
+ self.assertTrue(pol._cooldown_inprogress())
+
+ def test_cooldown_is_in_progress_scaling_unfinished(self):
+ t = template_format.parse(as_template)
+ stack = utils.parse_stack(t, params=as_params)
+ pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
+
+ previous_meta = {'scaling_in_progress': True}
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertTrue(pol._cooldown_inprogress())
- def test_not_in_progress(self):
+ def test_cooldown_not_in_progress(self):
t = template_format.parse(as_template)
stack = utils.parse_stack(t, params=as_params)
pol = self.create_scaling_policy(t, stack, 'WebServerScaleUpPolicy')
awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
- previous_meta = {awhile_ago.isoformat(): 'ChangeInCapacity : 1'}
+ previous_meta = {
+ 'cooldown': {
+ awhile_ago.isoformat(): 'ChangeInCapacity : 1'
+ },
+ 'scaling_in_progress': False
+ }
self.patchobject(pol, 'metadata_get', return_value=previous_meta)
self.assertFalse(pol._cooldown_inprogress())
@@ -196,7 +211,9 @@ class TestCooldownMixin(common.HeatTestCase):
meta_set = self.patchobject(pol, 'metadata_set')
self.patchobject(timeutils, 'utcnow', return_value=nowish)
pol._cooldown_timestamp(reason)
- meta_set.assert_called_once_with({nowish.isoformat(): reason})
+ meta_set.assert_called_once_with(
+ {'cooldown': {nowish.isoformat(): reason},
+ 'scaling_in_progress': False})
class ScalingPolicyAttrTest(common.HeatTestCase):
diff --git a/heat/tests/aws/test_instance.py b/heat/tests/aws/test_instance.py
index 5bdbcb4a4..bdf7a84eb 100644
--- a/heat/tests/aws/test_instance.py
+++ b/heat/tests/aws/test_instance.py
@@ -701,9 +701,9 @@ class InstancesTest(common.HeatTestCase):
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
- def activate_status(server):
- server.status = 'ACTIVE'
- return_server.get = activate_status.__get__(return_server)
+ def fail_status(server):
+ server.status = 'ERROR'
+ return_server.get = fail_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
@@ -714,7 +714,7 @@ class InstancesTest(common.HeatTestCase):
error = self.assertRaises(exception.ResourceFailure, updater)
self.assertEqual(
"Error: resources.ud_type_f: "
- "Resizing to 'm1.small' failed, status 'ACTIVE'",
+ "Resizing to 'm1.small' failed, status 'ERROR'",
six.text_type(error))
self.assertEqual((instance.UPDATE, instance.FAILED), instance.state)
self.m.VerifyAll()
@@ -1013,16 +1013,14 @@ class InstancesTest(common.HeatTestCase):
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
- def test_instance_status_suspend(self):
+ def _test_instance_status_suspend(self, name,
+ state=('CREATE', 'COMPLETE')):
return_server = self.fc.servers.list()[1]
- instance = self._create_test_instance(return_server,
- 'in_suspend_wait')
+ instance = self._create_test_instance(return_server, name)
instance.resource_id = '1234'
- self.m.ReplayAll()
+ instance.state_set(state[0], state[1])
- # Override the get_servers_1234 handler status to SUSPENDED, but
- # return the ACTIVE state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'ACTIVE'
@@ -1039,16 +1037,28 @@ class InstancesTest(common.HeatTestCase):
self.m.VerifyAll()
- def test_instance_status_resume(self):
+ def test_instance_suspend_in_create_complete(self):
+ self._test_instance_status_suspend(
+ name='test_suspend_in_create_complete')
+
+ def test_instance_suspend_in_suspend_failed(self):
+ self._test_instance_status_suspend(
+ name='test_suspend_in_suspend_failed',
+ state=('SUSPEND', 'FAILED'))
+
+ def test_server_suspend_in_suspend_complete(self):
+ self._test_instance_status_suspend(
+ name='test_suspend_in_suspend_complete',
+ state=('SUSPEND', 'COMPLETE'))
+
+ def _test_instance_status_resume(self, name,
+ state=('SUSPEND', 'COMPLETE')):
return_server = self.fc.servers.list()[1]
- instance = self._create_test_instance(return_server,
- 'in_resume_wait')
+ instance = self._create_test_instance(return_server, name)
instance.resource_id = '1234'
- self.m.ReplayAll()
+ instance.state_set(state[0], state[1])
- # Override the get_servers_1234 handler status to ACTIVE, but
- # return the SUSPENDED state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'SUSPENDED'
@@ -1067,6 +1077,20 @@ class InstancesTest(common.HeatTestCase):
self.m.VerifyAll()
+ def test_instance_resume_in_suspend_complete(self):
+ self._test_instance_status_resume(
+ name='test_resume_in_suspend_complete')
+
+ def test_instance_resume_in_resume_failed(self):
+ self._test_instance_status_resume(
+ name='test_resume_in_resume_failed',
+ state=('RESUME', 'FAILED'))
+
+ def test_instance_resume_in_resume_complete(self):
+ self._test_instance_status_resume(
+ name='test_resume_in_resume_complete',
+ state=('RESUME', 'COMPLETE'))
+
def test_server_resume_other_exception(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
diff --git a/heat/tests/aws/test_volume.py b/heat/tests/aws/test_volume.py
index 721827f0a..adacafd62 100644
--- a/heat/tests/aws/test_volume.py
+++ b/heat/tests/aws/test_volume.py
@@ -27,8 +27,8 @@ from heat.engine.resources.aws.ec2 import instance
from heat.engine.resources.aws.ec2 import volume as aws_vol
from heat.engine import rsrc_defn
from heat.engine import scheduler
+from heat.tests.cinder import test_volume_utils as vt_base
from heat.tests.nova import fakes as fakes_nova
-from heat.tests import test_volume_utils as vt_base
from heat.tests import utils
diff --git a/heat/tests/ceilometer/__init__.py b/heat/tests/ceilometer/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/ceilometer/__init__.py
diff --git a/heat/tests/test_ceilometer_alarm.py b/heat/tests/ceilometer/test_ceilometer_alarm.py
index 64fe0fcb4..64fe0fcb4 100644
--- a/heat/tests/test_ceilometer_alarm.py
+++ b/heat/tests/ceilometer/test_ceilometer_alarm.py
diff --git a/heat/tests/test_gnocchi_alarm.py b/heat/tests/ceilometer/test_gnocchi_alarm.py
index ab0991190..ab0991190 100644
--- a/heat/tests/test_gnocchi_alarm.py
+++ b/heat/tests/ceilometer/test_gnocchi_alarm.py
diff --git a/heat/tests/cinder/__init__.py b/heat/tests/cinder/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/cinder/__init__.py
diff --git a/heat/tests/test_cinder_volume_type.py b/heat/tests/cinder/test_cinder_volume_type.py
index 74fadeebe..74fadeebe 100644
--- a/heat/tests/test_cinder_volume_type.py
+++ b/heat/tests/cinder/test_cinder_volume_type.py
diff --git a/heat/tests/openstack/test_volume.py b/heat/tests/cinder/test_volume.py
index e749cce13..09ca39289 100644
--- a/heat/tests/openstack/test_volume.py
+++ b/heat/tests/cinder/test_volume.py
@@ -26,8 +26,8 @@ from heat.engine.resources.openstack.cinder import volume as c_vol
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.objects import resource_data as resource_data_object
+from heat.tests.cinder import test_volume_utils as vt_base
from heat.tests.nova import fakes as fakes_nova
-from heat.tests import test_volume_utils as vt_base
from heat.tests import utils
cinder_volume_template = '''
diff --git a/heat/tests/cinder/test_volume_type_encryption.py b/heat/tests/cinder/test_volume_type_encryption.py
new file mode 100644
index 000000000..6188c41e0
--- /dev/null
+++ b/heat/tests/cinder/test_volume_type_encryption.py
@@ -0,0 +1,117 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from heat.engine.resources.openstack.cinder import cinder_encrypted_vol_type
+from heat.engine import stack
+from heat.engine import template
+from heat.tests import common
+from heat.tests import utils
+
+cinder_volume_type_encryption = {
+ 'heat_template_version': '2015-04-30',
+ 'resources': {
+ 'my_encrypted_vol_type': {
+ 'type': 'OS::Cinder::EncryptedVolumeType',
+ 'properties': {
+ 'provider': 'nova.volume.encryptors.luks.LuksEncryptor',
+ 'control_location': 'front-end',
+ 'cipher': 'aes-xts-plain64',
+ 'key_size': '512',
+ 'volume_type': '01bd581d-33fe-4d6d-bd7b-70ae076d39fb'
+ }
+ }
+ }
+}
+
+
+class CinderEncryptedVolumeTypeTest(common.HeatTestCase):
+ def setUp(self):
+ super(CinderEncryptedVolumeTypeTest, self).setUp()
+
+ self.ctx = utils.dummy_context()
+
+ self.stack = stack.Stack(
+ self.ctx, 'cinder_vol_type_encryption_test_stack',
+ template.Template(cinder_volume_type_encryption)
+ )
+
+ self.my_encrypted_vol_type = self.stack['my_encrypted_vol_type']
+ cinder = mock.MagicMock()
+ self.cinderclient = mock.MagicMock()
+ self.my_encrypted_vol_type.cinder = cinder
+ cinder.return_value = self.cinderclient
+ self.volume_encryption_types = \
+ self.cinderclient.volume_encryption_types
+
+ def test_resource_mapping(self):
+ mapping = cinder_encrypted_vol_type.resource_mapping()
+ self.assertEqual(1, len(mapping))
+ self.assertEqual(cinder_encrypted_vol_type.CinderEncryptedVolumeType,
+ mapping['OS::Cinder::EncryptedVolumeType'])
+ self.assertIsInstance(
+ self.my_encrypted_vol_type,
+ cinder_encrypted_vol_type.CinderEncryptedVolumeType
+ )
+
+ def test_handle_create(self):
+ value = mock.MagicMock()
+ volume_type_id = '01bd581d-33fe-4d6d-bd7b-70ae076d39fb'
+ value.volume_type_id = volume_type_id
+ self.volume_encryption_types.create.return_value = value
+
+ with mock.patch.object(self.my_encrypted_vol_type.client_plugin(),
+ 'get_volume_type') as mock_get_volume_type:
+ mock_get_volume_type.return_value = volume_type_id
+ self.my_encrypted_vol_type.handle_create()
+ mock_get_volume_type.assert_called_once_with(volume_type_id)
+
+ specs = {
+ 'control_location': 'front-end',
+ 'cipher': 'aes-xts-plain64',
+ 'key_size': 512,
+ 'provider': 'nova.volume.encryptors.luks.LuksEncryptor'
+ }
+ self.volume_encryption_types.create.assert_called_once_with(
+ volume_type=volume_type_id, specs=specs)
+ self.assertEqual(volume_type_id,
+ self.my_encrypted_vol_type.resource_id)
+
+ def test_handle_update(self):
+ update_args = {
+ 'control_location': 'back-end',
+ 'key_size': 256,
+ 'cipher': 'aes-cbc-essiv',
+ 'provider':
+ 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
+ }
+ volume_type_id = '01bd581d-33fe-4d6d-bd7b-70ae076d39fb'
+ self.my_encrypted_vol_type.resource_id = volume_type_id
+ self.my_encrypted_vol_type.handle_update(json_snippet=None,
+ tmpl_diff=None,
+ prop_diff=update_args)
+
+ self.volume_encryption_types.update.assert_called_once_with(
+ volume_type=volume_type_id, specs=update_args)
+
+ def test_handle_delete(self):
+ volume_type_id = '01bd581d-33fe-4d6d-bd7b-70ae076d39fb'
+ self.my_encrypted_vol_type.resource_id = volume_type_id
+ self.volume_encryption_types.delete.return_value = None
+ self.assertIsNone(self.my_encrypted_vol_type.handle_delete())
+
+ def test_handle_delete_rsrc_not_found(self):
+ exc = self.cinderclient.HTTPClientError('Not Found.')
+ self.volume_encryption_types.delete.side_effect = exc
+ self.assertIsNone(self.my_encrypted_vol_type.handle_delete())
diff --git a/heat/tests/test_volume_utils.py b/heat/tests/cinder/test_volume_utils.py
index be4e2195f..be4e2195f 100644
--- a/heat/tests/test_volume_utils.py
+++ b/heat/tests/cinder/test_volume_utils.py
diff --git a/heat/tests/clients/__init__.py b/heat/tests/clients/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/clients/__init__.py
diff --git a/heat/tests/test_barbican_client.py b/heat/tests/clients/test_barbican_client.py
index 24d8c6efa..24d8c6efa 100644
--- a/heat/tests/test_barbican_client.py
+++ b/heat/tests/clients/test_barbican_client.py
diff --git a/heat/tests/test_cinder_client.py b/heat/tests/clients/test_cinder_client.py
index 02a22944f..02a22944f 100644
--- a/heat/tests/test_cinder_client.py
+++ b/heat/tests/clients/test_cinder_client.py
diff --git a/heat/tests/test_clients.py b/heat/tests/clients/test_clients.py
index a24c4b264..88159e5cc 100644
--- a/heat/tests/test_clients.py
+++ b/heat/tests/clients/test_clients.py
@@ -258,7 +258,6 @@ class ClientPluginTest(common.HeatTestCase):
# assert token is from plugin rather than context
# even though both are set
self.assertEqual('5678', plugin.auth_token)
- con.auth_plugin.get_token.assert_called()
def test_url_for(self):
con = mock.Mock()
@@ -275,7 +274,7 @@ class ClientPluginTest(common.HeatTestCase):
self.assertEqual('http://192.0.2.1/foo',
plugin.url_for(service_type='foo'))
- con.auth_plugin.get_endpoint.assert_called()
+ self.assertTrue(con.auth_plugin.get_endpoint.called)
@mock.patch.object(context, "RequestContext")
@mock.patch.object(v3, "Token", name="v3_token")
diff --git a/heat/tests/clients/test_designate_client.py b/heat/tests/clients/test_designate_client.py
new file mode 100644
index 000000000..9a89e90e5
--- /dev/null
+++ b/heat/tests/clients/test_designate_client.py
@@ -0,0 +1,156 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import six
+
+from designateclient import exceptions as designate_exceptions
+from designateclient import v1 as designate_client
+
+from heat.common import exception as heat_exception
+from heat.engine.clients.os import designate as client
+from heat.tests import common
+
+
+class DesignateDomainConstraintTest(common.HeatTestCase):
+
+ def test_expected_exceptions(self):
+ self.assertEqual((heat_exception.EntityNotFound,),
+ client.DesignateDomainConstraint.expected_exceptions,
+ "DesignateDomainConstraint expected exceptions error")
+
+ def test_constrain(self):
+ constrain = client.DesignateDomainConstraint()
+ client_mock = mock.MagicMock()
+ client_plugin_mock = mock.MagicMock()
+ client_plugin_mock.get_domain_id.return_value = None
+ client_mock.client_plugin.return_value = client_plugin_mock
+
+ self.assertIsNone(constrain.validate_with_client(client_mock,
+ 'domain_1'))
+
+ client_plugin_mock.get_domain_id.assert_called_once_with('domain_1')
+
+
+class DesignateClientPluginTest(common.HeatTestCase):
+ @mock.patch.object(designate_client, 'Client')
+ @mock.patch.object(client.DesignateClientPlugin, '_get_client_args')
+ def test_client(self,
+ get_client_args,
+ client_designate):
+ args = dict(
+ auth_url='auth_url',
+ project_id='project_id',
+ token=lambda: '',
+ os_endpoint='os_endpoint',
+ cacert='cacert',
+ insecure='insecure'
+ )
+ get_client_args.return_value = args
+
+ client_plugin = client.DesignateClientPlugin(
+ context=mock.MagicMock()
+ )
+ client_plugin.client()
+
+ # Make sure the right args are created
+ get_client_args.assert_called_once_with(
+ service_name='designate',
+ service_type='dns'
+ )
+
+ # Make sure proper client is created with expected args
+ client_designate.assert_called_once_with(
+ auth_url='auth_url',
+ project_id='project_id',
+ token='',
+ endpoint='os_endpoint',
+ cacert='cacert',
+ insecure='insecure'
+ )
+
+
+class DesignateClientPluginDomainTest(common.HeatTestCase):
+
+ sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
+ sample_name = 'test-domain.com'
+
+ def _get_mock_domain(self):
+ domain = mock.MagicMock()
+ domain.id = self.sample_uuid
+ domain.name = self.sample_name
+ return domain
+
+ def setUp(self):
+ super(DesignateClientPluginDomainTest, self).setUp()
+ self._client = mock.MagicMock()
+ self.client_plugin = client.DesignateClientPlugin(
+ context=mock.MagicMock()
+ )
+
+ @mock.patch.object(client.DesignateClientPlugin, 'client')
+ def test_get_domain_id(self, client_designate):
+ self._client.domains.get.return_value = self._get_mock_domain()
+ client_designate.return_value = self._client
+
+ self.assertEqual(self.sample_uuid,
+ self.client_plugin.get_domain_id(self.sample_uuid))
+ self._client.domains.get.assert_called_once_with(
+ self.sample_uuid)
+
+ @mock.patch.object(client.DesignateClientPlugin, 'client')
+ def test_get_domain_id_not_found(self, client_designate):
+ self._client.domains.get.side_effect = (designate_exceptions
+ .NotFound)
+ client_designate.return_value = self._client
+
+ ex = self.assertRaises(heat_exception.EntityNotFound,
+ self.client_plugin.get_domain_id,
+ self.sample_uuid)
+ msg = ("The Designate Domain (%(name)s) could not be found." %
+ {'name': self.sample_uuid})
+ self.assertEqual(msg, six.text_type(ex))
+ self._client.domains.get.assert_called_once_with(
+ self.sample_uuid)
+
+ @mock.patch.object(client.DesignateClientPlugin, 'client')
+ def test_get_domain_id_by_name(self, client_designate):
+ self._client.domains.get.side_effect = (designate_exceptions
+ .NotFound)
+ self._client.domains.list.return_value = [self._get_mock_domain()]
+ client_designate.return_value = self._client
+
+ self.assertEqual(self.sample_uuid,
+ self.client_plugin.get_domain_id(self.sample_name))
+
+ self._client.domains.get.assert_called_once_with(
+ self.sample_name)
+ self._client.domains.list.assert_called_once_with()
+
+ @mock.patch.object(client.DesignateClientPlugin, 'client')
+ def test_get_domain_id_by_name_not_found(self, client_designate):
+ self._client.domains.get.side_effect = (designate_exceptions
+ .NotFound)
+ self._client.domains.list.return_value = []
+ client_designate.return_value = self._client
+
+ ex = self.assertRaises(heat_exception.EntityNotFound,
+ self.client_plugin.get_domain_id,
+ self.sample_name)
+ msg = ("The Designate Domain (%(name)s) could not be found." %
+ {'name': self.sample_name})
+ self.assertEqual(msg, six.text_type(ex))
+
+ self._client.domains.get.assert_called_once_with(
+ self.sample_name)
+ self._client.domains.list.assert_called_once_with()
diff --git a/heat/tests/test_glance_client.py b/heat/tests/clients/test_glance_client.py
index 27dabac07..27dabac07 100644
--- a/heat/tests/test_glance_client.py
+++ b/heat/tests/clients/test_glance_client.py
diff --git a/heat/tests/test_heatclient.py b/heat/tests/clients/test_heat_client.py
index fdea6916d..55d47ff4c 100644
--- a/heat/tests/test_heatclient.py
+++ b/heat/tests/clients/test_heat_client.py
@@ -1569,19 +1569,3 @@ class KeystoneClientTestDomainName(KeystoneClientTest):
def test_create_stack_domain_user(self):
p = super(KeystoneClientTestDomainName, self)
p.test_create_stack_domain_user()
-
-
-class HeatClientTest(KeystoneClientTest):
- """Test cases for heat.common.config"""
-
- def setUp(self):
- super(HeatClientTest, self).setUp()
-
- def test_init_auth_encryption_key_length(self):
- """Test for length of the auth_encryption_length in config file"""
- cfg.CONF.set_override('auth_encryption_key', 'abcdefghijklma')
- err = self.assertRaises(exception.Error,
- config.startup_sanity_check)
- exp_msg = ('heat.conf misconfigured, auth_encryption_key '
- 'length must be 16, 24 or 32')
- self.assertIn(exp_msg, six.text_type(err))
diff --git a/heat/tests/keystone/test_client.py b/heat/tests/clients/test_keystone_client.py
index 529d3036c..529d3036c 100644
--- a/heat/tests/keystone/test_client.py
+++ b/heat/tests/clients/test_keystone_client.py
diff --git a/heat/tests/test_magnum_client.py b/heat/tests/clients/test_magnum_client.py
index f6fd7d630..f6fd7d630 100644
--- a/heat/tests/test_magnum_client.py
+++ b/heat/tests/clients/test_magnum_client.py
diff --git a/heat/tests/test_manila_client.py b/heat/tests/clients/test_manila_client.py
index 54575541b..54575541b 100644
--- a/heat/tests/test_manila_client.py
+++ b/heat/tests/clients/test_manila_client.py
diff --git a/heat/tests/test_mistral_client.py b/heat/tests/clients/test_mistral_client.py
index 270fd2851..270fd2851 100644
--- a/heat/tests/test_mistral_client.py
+++ b/heat/tests/clients/test_mistral_client.py
diff --git a/heat/tests/neutron/test_neutron_client.py b/heat/tests/clients/test_neutron_client.py
index 7d33c2a04..7d33c2a04 100644
--- a/heat/tests/neutron/test_neutron_client.py
+++ b/heat/tests/clients/test_neutron_client.py
diff --git a/heat/tests/test_nova_client.py b/heat/tests/clients/test_nova_client.py
index 7e10cb56c..7e10cb56c 100644
--- a/heat/tests/test_nova_client.py
+++ b/heat/tests/clients/test_nova_client.py
diff --git a/heat/tests/test_sahara_client.py b/heat/tests/clients/test_sahara_client.py
index c66bddccb..c66bddccb 100644
--- a/heat/tests/test_sahara_client.py
+++ b/heat/tests/clients/test_sahara_client.py
diff --git a/heat/tests/test_swift_client.py b/heat/tests/clients/test_swift_client.py
index ba5177e33..ba5177e33 100644
--- a/heat/tests/test_swift_client.py
+++ b/heat/tests/clients/test_swift_client.py
diff --git a/heat/tests/test_zaqar_client.py b/heat/tests/clients/test_zaqar_client.py
index 88c86a253..88c86a253 100644
--- a/heat/tests/test_zaqar_client.py
+++ b/heat/tests/clients/test_zaqar_client.py
diff --git a/heat/tests/common.py b/heat/tests/common.py
index 44f522125..d4140a5e2 100644
--- a/heat/tests/common.py
+++ b/heat/tests/common.py
@@ -147,6 +147,8 @@ class HeatTestCase(testscenarios.WithScenarios,
generic_rsrc.ResourceWithCustomConstraint)
resource._register_class('ResourceWithComplexAttributesType',
generic_rsrc.ResourceWithComplexAttributes)
+ resource._register_class('ResourceWithDefaultClientName',
+ generic_rsrc.ResourceWithDefaultClientName)
def stub_wallclock(self):
"""
@@ -236,3 +238,7 @@ class HeatTestCase(testscenarios.WithScenarios,
def stub_RouterConstraint_validate(self):
validate = self.patchobject(neutron.RouterConstraint, 'validate')
validate.return_value = True
+
+ def stub_NovaNetworkConstraint(self):
+ validate = self.patchobject(nova.NetworkConstraint, 'validate')
+ validate.return_value = True
diff --git a/heat/tests/db/test_sqlalchemy_api.py b/heat/tests/db/test_sqlalchemy_api.py
index 92a48f31f..78fe6f41d 100644
--- a/heat/tests/db/test_sqlalchemy_api.py
+++ b/heat/tests/db/test_sqlalchemy_api.py
@@ -22,6 +22,7 @@ from oslo_utils import timeutils
import six
from heat.common import context
+from heat.common import crypt
from heat.common import exception
from heat.common import template_format
from heat.db.sqlalchemy import api as db_api
@@ -1500,8 +1501,8 @@ class DBAPIUserCredsTest(common.HeatTestCase):
trustor_user_id='trustor_id')
self.assertIsNotNone(user_creds.id)
self.assertEqual('test_trust_id',
- db_api._decrypt(user_creds.trust_id,
- user_creds.decrypt_method))
+ crypt.decrypt(user_creds.decrypt_method,
+ user_creds.trust_id))
self.assertEqual('trustor_id', user_creds.trustor_user_id)
self.assertIsNone(user_creds.username)
self.assertIsNone(user_creds.password)
@@ -1512,14 +1513,14 @@ class DBAPIUserCredsTest(common.HeatTestCase):
user_creds = create_user_creds(self.ctx)
self.assertIsNotNone(user_creds.id)
self.assertEqual(self.ctx.password,
- db_api._decrypt(user_creds.password,
- user_creds.decrypt_method))
+ crypt.decrypt(user_creds.decrypt_method,
+ user_creds.password))
def test_user_creds_get(self):
user_creds = create_user_creds(self.ctx)
ret_user_creds = db_api.user_creds_get(user_creds.id)
- self.assertEqual(db_api._decrypt(user_creds.password,
- user_creds.decrypt_method),
+ self.assertEqual(crypt.decrypt(user_creds.decrypt_method,
+ user_creds.password),
ret_user_creds['password'])
def test_user_creds_get_noexist(self):
@@ -2731,8 +2732,9 @@ class DBAPICryptParamsPropsTest(common.HeatTestCase):
self.ctx, cfg.CONF.auth_encryption_key)
env = session.query(models.RawTemplate).all()[0].environment
- self.assertEqual('oslo_decrypt_v1',
+ self.assertEqual('cryptography_decrypt_v1',
env['parameters']['param2'][0])
+ encrypt_value = env['parameters']['param2'][1]
db_api.db_decrypt_parameters_and_properties(
self.ctx, cfg.CONF.auth_encryption_key)
@@ -2742,9 +2744,13 @@ class DBAPICryptParamsPropsTest(common.HeatTestCase):
# Use a different encryption key to decrypt
db_api.db_encrypt_parameters_and_properties(
- self.ctx, cfg.CONF.auth_encryption_key)
+ self.ctx, '774c15be099ea74123a9b9592ff12680')
+ env = session.query(models.RawTemplate).all()[0].environment
+ self.assertNotEqual(encrypt_value,
+ env['parameters']['param2'][1])
+
db_api.db_decrypt_parameters_and_properties(
self.ctx, '774c15be099ea74123a9b9592ff12680')
env = session.query(models.RawTemplate).all()[0].environment
- self.assertNotEqual('bar', env['parameters']['param2'])
+ self.assertEqual('bar', env['parameters']['param2'])
diff --git a/heat/tests/engine/test_service_engine.py b/heat/tests/engine/test_service_engine.py
index 3854a7e9f..b4fd07d1a 100644
--- a/heat/tests/engine/test_service_engine.py
+++ b/heat/tests/engine/test_service_engine.py
@@ -39,7 +39,7 @@ class ServiceEngineTest(common.HeatTestCase):
def test_make_sure_rpc_version(self):
self.assertEqual(
- '1.11',
+ '1.12',
service.EngineService.RPC_API_VERSION,
('RPC version is changed, please update this test to new version '
'and make sure additional test cases are added for RPC APIs '
@@ -378,4 +378,4 @@ class ServiceEngineTest(common.HeatTestCase):
@mock.patch('oslo_log.log.setup')
def test_engine_service_reset(self, setup_logging_mock):
self.eng.reset()
- setup_logging_mock.assertCalledOnceWith(cfg.CONF, 'heat')
+ setup_logging_mock.assert_called_once_with(cfg.CONF, 'heat')
diff --git a/heat/tests/engine/test_software_config.py b/heat/tests/engine/test_software_config.py
index c7cfc74a7..93f98a979 100644
--- a/heat/tests/engine/test_software_config.py
+++ b/heat/tests/engine/test_software_config.py
@@ -755,5 +755,5 @@ class SoftwareConfigServiceTest(common.HeatTestCase):
self.engine.show_software_deployment(self.ctx, deployment_id))
zaqar_client.queue.assert_called_once_with('6789')
- queue.pop.assert_called_once()
+ queue.pop.assert_called_once_with()
ssd.assert_called_once_with(self.ctx, deployment_id, 'ok', None)
diff --git a/heat/tests/engine/test_stack_create.py b/heat/tests/engine/test_stack_create.py
index 52bd3a5a5..b58ed0b03 100644
--- a/heat/tests/engine/test_stack_create.py
+++ b/heat/tests/engine/test_stack_create.py
@@ -67,7 +67,7 @@ class StackCreateTest(common.HeatTestCase):
stack_user_project_id=None,
convergence=False,
parent_resource=None)
- mock_validate.assert_called_once()
+ mock_validate.assert_called_once_with()
def test_stack_create(self):
stack_name = 'service_create_test_stack'
diff --git a/heat/tests/engine/test_stack_events.py b/heat/tests/engine/test_stack_events.py
new file mode 100644
index 000000000..bfb082dda
--- /dev/null
+++ b/heat/tests/engine/test_stack_events.py
@@ -0,0 +1,238 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from heat.engine import resource as res
+from heat.engine.resources.aws.ec2 import instance as instances
+from heat.engine import service
+from heat.engine import stack as parser
+from heat.objects import event as event_object
+from heat.objects import stack as stack_object
+from heat.tests import common
+from heat.tests.engine import tools
+from heat.tests import generic_resource as generic_rsrc
+from heat.tests import utils
+
+
+class StackEventTest(common.HeatTestCase):
+
+ def setUp(self):
+ super(StackEventTest, self).setUp()
+
+ self.ctx = utils.dummy_context(tenant_id='stack_event_test_tenant')
+ self.eng = service.EngineService('a-host', 'a-topic')
+ self.eng.create_periodic_tasks()
+
+ @tools.stack_context('service_event_list_test_stack')
+ @mock.patch.object(service.EngineService, '_get_stack')
+ def test_event_list(self, mock_get):
+ mock_get.return_value = stack_object.Stack.get_by_id(self.ctx,
+ self.stack.id)
+ events = self.eng.list_events(self.ctx, self.stack.identifier())
+
+ self.assertEqual(4, len(events))
+ for ev in events:
+ self.assertIn('event_identity', ev)
+ self.assertIsInstance(ev['event_identity'], dict)
+ self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
+
+ self.assertIn('resource_name', ev)
+ self.assertIn(ev['resource_name'],
+ ('service_event_list_test_stack', 'WebServer'))
+
+ self.assertIn('physical_resource_id', ev)
+
+ self.assertIn('resource_properties', ev)
+ # Big long user data field.. it mentions 'wordpress'
+ # a few times so this should work.
+ if ev.get('resource_properties'):
+ user_data = ev['resource_properties']['UserData']
+ self.assertIn('wordpress', user_data)
+ self.assertEqual('F17-x86_64-gold',
+ ev['resource_properties']['ImageId'])
+ self.assertEqual('m1.large',
+ ev['resource_properties']['InstanceType'])
+
+ self.assertEqual('CREATE', ev['resource_action'])
+ self.assertIn(ev['resource_status'], ('IN_PROGRESS', 'COMPLETE'))
+
+ self.assertIn('resource_status_reason', ev)
+ self.assertIn(ev['resource_status_reason'],
+ ('state changed',
+ 'Stack CREATE started',
+ 'Stack CREATE completed successfully'))
+
+ self.assertIn('resource_type', ev)
+ self.assertIn(ev['resource_type'],
+ ('AWS::EC2::Instance', 'OS::Heat::Stack'))
+
+ self.assertIn('stack_identity', ev)
+
+ self.assertIn('stack_name', ev)
+ self.assertEqual(self.stack.name, ev['stack_name'])
+
+ self.assertIn('event_time', ev)
+
+ mock_get.assert_called_once_with(self.ctx, self.stack.identifier(),
+ show_deleted=True)
+
+ @tools.stack_context('service_event_list_deleted_resource')
+ @mock.patch.object(instances.Instance, 'handle_delete')
+ def test_event_list_deleted_resource(self, mock_delete):
+ mock_delete.return_value = None
+
+ res._register_class('GenericResourceType',
+ generic_rsrc.GenericResource)
+
+ thread = mock.Mock()
+ thread.link = mock.Mock(return_value=None)
+
+ def run(stack_id, func, *args, **kwargs):
+ func(*args)
+ return thread
+ self.eng.thread_group_mgr.start = run
+
+ new_tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
+ 'Resources': {'AResource': {'Type':
+ 'GenericResourceType'}}}
+
+ result = self.eng.update_stack(self.ctx, self.stack.identifier(),
+ new_tmpl, None, None, {})
+
+ # The self.stack reference needs to be updated. Since the underlying
+ # stack is updated in update_stack, the original reference is now
+ # pointing to an orphaned stack object.
+ self.stack = parser.Stack.load(self.ctx, stack_id=result['stack_id'])
+
+ self.assertEqual(result, self.stack.identifier())
+ self.assertIsInstance(result, dict)
+ self.assertTrue(result['stack_id'])
+ events = self.eng.list_events(self.ctx, self.stack.identifier())
+
+ self.assertEqual(10, len(events))
+
+ for ev in events:
+ self.assertIn('event_identity', ev)
+ self.assertIsInstance(ev['event_identity'], dict)
+ self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
+
+ self.assertIn('resource_name', ev)
+ self.assertIn('physical_resource_id', ev)
+ self.assertIn('resource_properties', ev)
+ self.assertIn('resource_status_reason', ev)
+
+ self.assertIn(ev['resource_action'],
+ ('CREATE', 'UPDATE', 'DELETE'))
+ self.assertIn(ev['resource_status'], ('IN_PROGRESS', 'COMPLETE'))
+
+ self.assertIn('resource_type', ev)
+ self.assertIn(ev['resource_type'], ('AWS::EC2::Instance',
+ 'GenericResourceType',
+ 'OS::Heat::Stack'))
+
+ self.assertIn('stack_identity', ev)
+
+ self.assertIn('stack_name', ev)
+ self.assertEqual(self.stack.name, ev['stack_name'])
+
+ self.assertIn('event_time', ev)
+
+ mock_delete.assert_called_once_with()
+ expected = [
+ mock.call(mock.ANY),
+ mock.call(mock.ANY, self.stack.id, mock.ANY)
+ ]
+ self.assertEqual(expected, thread.link.call_args_list)
+
+ @tools.stack_context('service_event_list_by_tenant')
+ def test_event_list_by_tenant(self):
+ events = self.eng.list_events(self.ctx, None)
+
+ self.assertEqual(4, len(events))
+ for ev in events:
+ self.assertIn('event_identity', ev)
+ self.assertIsInstance(ev['event_identity'], dict)
+ self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
+
+ self.assertIn('resource_name', ev)
+ self.assertIn(ev['resource_name'],
+ ('WebServer', 'service_event_list_by_tenant'))
+
+ self.assertIn('physical_resource_id', ev)
+
+ self.assertIn('resource_properties', ev)
+ # Big long user data field.. it mentions 'wordpress'
+ # a few times so this should work.
+ if ev.get('resource_properties'):
+ user_data = ev['resource_properties']['UserData']
+ self.assertIn('wordpress', user_data)
+ self.assertEqual('F17-x86_64-gold',
+ ev['resource_properties']['ImageId'])
+ self.assertEqual('m1.large',
+ ev['resource_properties']['InstanceType'])
+
+ self.assertEqual('CREATE', ev['resource_action'])
+ self.assertIn(ev['resource_status'], ('IN_PROGRESS', 'COMPLETE'))
+
+ self.assertIn('resource_status_reason', ev)
+ self.assertIn(ev['resource_status_reason'],
+ ('state changed',
+ 'Stack CREATE started',
+ 'Stack CREATE completed successfully'))
+
+ self.assertIn('resource_type', ev)
+ self.assertIn(ev['resource_type'],
+ ('AWS::EC2::Instance', 'OS::Heat::Stack'))
+
+ self.assertIn('stack_identity', ev)
+
+ self.assertIn('stack_name', ev)
+ self.assertEqual(self.stack.name, ev['stack_name'])
+
+ self.assertIn('event_time', ev)
+
+ @mock.patch.object(event_object.Event, 'get_all_by_stack')
+ @mock.patch.object(service.EngineService, '_get_stack')
+ def test_event_list_with_marker_and_filters(self, mock_get, mock_get_all):
+ limit = object()
+ marker = object()
+ sort_keys = object()
+ sort_dir = object()
+ filters = object()
+ mock_get.return_value = mock.Mock(id=1)
+ self.eng.list_events(self.ctx, 1, limit=limit, marker=marker,
+ sort_keys=sort_keys, sort_dir=sort_dir,
+ filters=filters)
+
+ mock_get_all.assert_called_once_with(self.ctx, 1, limit=limit,
+ sort_keys=sort_keys,
+ marker=marker, sort_dir=sort_dir,
+ filters=filters)
+
+ @mock.patch.object(event_object.Event, 'get_all_by_tenant')
+ def test_tenant_events_list_with_marker_and_filters(self, mock_get_all):
+ limit = object()
+ marker = object()
+ sort_keys = object()
+ sort_dir = object()
+ filters = object()
+
+ self.eng.list_events(self.ctx, None, limit=limit, marker=marker,
+ sort_keys=sort_keys, sort_dir=sort_dir,
+ filters=filters)
+ mock_get_all.assert_called_once_with(self.ctx, limit=limit,
+ sort_keys=sort_keys,
+ marker=marker,
+ sort_dir=sort_dir,
+ filters=filters)
diff --git a/heat/tests/generic_resource.py b/heat/tests/generic_resource.py
index c1fc6969b..923dc31de 100644
--- a/heat/tests/generic_resource.py
+++ b/heat/tests/generic_resource.py
@@ -33,6 +33,10 @@ class GenericResource(resource.Resource):
attributes_schema = {'foo': attributes.Schema('A generic attribute'),
'Foo': attributes.Schema('Another generic attribute')}
+ @classmethod
+ def is_service_available(cls, context):
+ return True
+
def handle_create(self):
LOG.warn(_LW('Creating generic resource (Type "%s")'),
self.type())
@@ -145,7 +149,7 @@ class SignalResource(signal_responder.SignalResponder):
def _resolve_attribute(self, name):
if name == 'AlarmUrl' and self.resource_id is not None:
- return six.text_type(self._get_signed_url())
+ return six.text_type(self._get_ec2_signed_url())
class StackUserResource(stack_user.StackUser):
@@ -177,3 +181,7 @@ class ResourceWithAttributeType(GenericResource):
return "valid_sting"
elif name == 'attr2':
return "invalid_type"
+
+
+class ResourceWithDefaultClientName(resource.Resource):
+ default_client_name = 'sample'
diff --git a/heat/tests/manila/__init__.py b/heat/tests/manila/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/manila/__init__.py
diff --git a/heat/tests/test_manila_security_service.py b/heat/tests/manila/test_manila_security_service.py
index ae71af8c8..ae71af8c8 100644
--- a/heat/tests/test_manila_security_service.py
+++ b/heat/tests/manila/test_manila_security_service.py
diff --git a/heat/tests/test_manila_share.py b/heat/tests/manila/test_manila_share.py
index 2a59242eb..e175e453e 100644
--- a/heat/tests/test_manila_share.py
+++ b/heat/tests/manila/test_manila_share.py
@@ -18,6 +18,7 @@ import six
from heat.common import exception
from heat.common import template_format
+from heat.engine import resource
from heat.engine.resources.openstack.manila import share as mshare
from heat.engine import rsrc_defn
from heat.engine import scheduler
@@ -113,18 +114,18 @@ class ManilaShareTest(common.HeatTestCase):
def test_share_create_fail(self):
share = self._init_share("stack_share_create_fail")
- share.client().shares.create.return_value = self.fake_share
share.client().shares.get.return_value = self.failed_share
- exc = self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(share.create))
+ exc = self.assertRaises(resource.ResourceInError,
+ share.check_create_complete,
+ self.failed_share)
self.assertIn("Error during creation", six.text_type(exc))
def test_share_create_unknown_status(self):
share = self._init_share("stack_share_create_unknown")
- share.client().shares.create.return_value = self.fake_share
share.client().shares.get.return_value = self.deleting_share
- exc = self.assertRaises(exception.ResourceFailure,
- scheduler.TaskRunner(share.create))
+ exc = self.assertRaises(resource.ResourceUnknownStatus,
+ share.check_create_complete,
+ self.deleting_share)
self.assertIn("Unknown status", six.text_type(exc))
def test_share_delete(self):
@@ -171,10 +172,9 @@ class ManilaShareTest(common.HeatTestCase):
kwargs = {
"display_name": "name",
"display_description": "desc",
- "is_public": True
}
- share.client().shares.update.assertCalledOnceWith(share.resource_id,
- **kwargs)
+ share.client().shares.update.assert_called_once_with(
+ share.resource_id, **kwargs)
def test_share_update_access_rules(self):
share = self._create_share("stack_share_update_access_rules")
diff --git a/heat/tests/test_manila_share_type.py b/heat/tests/manila/test_manila_share_type.py
index f0198d26f..f0198d26f 100644
--- a/heat/tests/test_manila_share_type.py
+++ b/heat/tests/manila/test_manila_share_type.py
diff --git a/heat/tests/test_share_network.py b/heat/tests/manila/test_share_network.py
index 9678d1e16..445ec53be 100644
--- a/heat/tests/test_share_network.py
+++ b/heat/tests/manila/test_share_network.py
@@ -17,7 +17,7 @@ import mock
from heat.common import exception
from heat.common import template_format
-from heat.engine import properties
+from heat.engine.clients.os import nova
from heat.engine.resources.openstack.manila import share_network
from heat.engine import scheduler
from heat.tests import common
@@ -65,8 +65,9 @@ class ManilaShareNetworkTest(common.HeatTestCase):
self.client_plugin = mock.Mock()
self.patchobject(share_network.ManilaShareNetwork, 'client_plugin',
return_value=self.client_plugin)
- self.patchobject(properties.Properties, 'validate',
- return_value=mock.Mock)
+ self.stub_NetworkConstraint_validate()
+ self.stub_NovaNetworkConstraint()
+ self.stub_SubnetConstraint_validate()
def _create_network(self, name, snippet, stack):
net = share_network.ManilaShareNetwork(name, snippet, stack)
@@ -189,6 +190,17 @@ class ManilaShareNetworkTest(common.HeatTestCase):
self.assertRaisesRegexp(exception.ResourcePropertyConflict, msg,
net.validate)
+ def test_nova_constraint_fail(self):
+ validate = self.patchobject(nova.NetworkConstraint, 'validate')
+ validate.return_value = False
+ t = template_format.parse(stack_template)
+ t['resources']['share_network']['properties']['nova_network'] = 1
+ stack = utils.parse_stack(t)
+ rsrc_defn = stack.t.resource_definitions(stack)['share_network']
+ self.assertRaises(exception.ResourceFailure,
+ self._create_network, 'share_network',
+ rsrc_defn, stack)
+
def test_attributes(self):
net = self._create_network('share_network', self.rsrc_defn,
self.stack)
diff --git a/heat/tests/mistral/__init__.py b/heat/tests/mistral/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/mistral/__init__.py
diff --git a/heat/tests/test_mistral_cron_trigger.py b/heat/tests/mistral/test_mistral_cron_trigger.py
index c6aadf47f..c0d7613a3 100644
--- a/heat/tests/test_mistral_cron_trigger.py
+++ b/heat/tests/mistral/test_mistral_cron_trigger.py
@@ -50,6 +50,12 @@ class FakeCronTrigger(object):
self.remaining_executions = 3
+class MistralCronTriggerTestResource(cron_trigger.CronTrigger):
+ @classmethod
+ def is_service_available(cls, context):
+ return True
+
+
class MistralCronTriggerTest(common.HeatTestCase):
def setUp(self):
@@ -64,11 +70,11 @@ class MistralCronTriggerTest(common.HeatTestCase):
self.rsrc_defn = resource_defns['cron_trigger']
self.client = mock.Mock()
- self.patchobject(cron_trigger.CronTrigger, 'client',
+ self.patchobject(MistralCronTriggerTestResource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
- ct = cron_trigger.CronTrigger(name, snippet, stack)
+ ct = MistralCronTriggerTestResource(name, snippet, stack)
self.client.cron_triggers.create.return_value = FakeCronTrigger(
'my_cron_trigger')
self.client.cron_triggers.get.return_value = FakeCronTrigger(
diff --git a/heat/tests/test_mistral_workflow.py b/heat/tests/mistral/test_mistral_workflow.py
index 485be5c5e..402434289 100644
--- a/heat/tests/test_mistral_workflow.py
+++ b/heat/tests/mistral/test_mistral_workflow.py
@@ -179,6 +179,12 @@ class FakeWorkflow(object):
self.name = name
+class MistralWorkFlowTestResource(workflow.Workflow):
+ @classmethod
+ def is_service_available(cls, context):
+ return True
+
+
class TestMistralWorkflow(common.HeatTestCase):
def setUp(self):
@@ -193,7 +199,7 @@ class TestMistralWorkflow(common.HeatTestCase):
self.rsrc_defn = resource_defns['workflow']
self.mistral = mock.Mock()
- self.patchobject(workflow.Workflow, 'mistral',
+ self.patchobject(MistralWorkFlowTestResource, 'mistral',
return_value=self.mistral)
self.patches = []
@@ -216,7 +222,7 @@ class TestMistralWorkflow(common.HeatTestCase):
patch.stop()
def _create_resource(self, name, snippet, stack):
- wf = workflow.Workflow(name, snippet, stack)
+ wf = MistralWorkFlowTestResource(name, snippet, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('test_stack-workflow-b5fiekfci3yc')]
scheduler.TaskRunner(wf.create)()
@@ -234,7 +240,7 @@ class TestMistralWorkflow(common.HeatTestCase):
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
- wf = workflow.Workflow('create_vm', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
@@ -269,7 +275,7 @@ class TestMistralWorkflow(common.HeatTestCase):
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
- wf = workflow.Workflow('workflow', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('workflow', rsrc_defns, stack)
exc = self.assertRaises(exception.StackValidationFailed,
wf.validate)
@@ -281,7 +287,7 @@ class TestMistralWorkflow(common.HeatTestCase):
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
- wf = workflow.Workflow('workflow', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('workflow', rsrc_defns, stack)
self.mistral.workflows.create.side_effect = Exception('boom!')
@@ -318,7 +324,7 @@ class TestMistralWorkflow(common.HeatTestCase):
self.mistral.workflows.update.return_value = [
FakeWorkflow('test_stack-workflow-b5fiekfci3yc')]
scheduler.TaskRunner(wf.update, new_wf)()
- self.mistral.workflows.update.assert_called_once()
+ self.assertTrue(self.mistral.workflows.update.called)
self.assertEqual((wf.UPDATE, wf.COMPLETE), wf.state)
def test_update_failed(self):
@@ -379,7 +385,7 @@ class TestMistralWorkflow(common.HeatTestCase):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
- wf = workflow.Workflow('create_vm', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
@@ -394,7 +400,7 @@ class TestMistralWorkflow(common.HeatTestCase):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
- wf = workflow.Workflow('create_vm', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
@@ -417,7 +423,7 @@ class TestMistralWorkflow(common.HeatTestCase):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
- wf = workflow.Workflow('create_vm', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
@@ -434,7 +440,7 @@ class TestMistralWorkflow(common.HeatTestCase):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
- wf = workflow.Workflow('create_vm', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
@@ -456,7 +462,7 @@ class TestMistralWorkflow(common.HeatTestCase):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
- wf = workflow.Workflow('create_vm', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
@@ -472,7 +478,7 @@ class TestMistralWorkflow(common.HeatTestCase):
tmpl = template_format.parse(workflow_template_with_params)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
- wf = workflow.Workflow('workflow', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('workflow', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('workflow')]
scheduler.TaskRunner(wf.create)()
@@ -487,7 +493,7 @@ class TestMistralWorkflow(common.HeatTestCase):
tmpl = template_format.parse(workflow_template_with_params_override)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
- wf = workflow.Workflow('workflow', rsrc_defns, stack)
+ wf = MistralWorkFlowTestResource('workflow', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('workflow')]
scheduler.TaskRunner(wf.create)()
diff --git a/heat/tests/neutron/test_neutron.py b/heat/tests/neutron/test_neutron.py
index 1699af95d..e5e0f95e8 100644
--- a/heat/tests/neutron/test_neutron.py
+++ b/heat/tests/neutron/test_neutron.py
@@ -107,6 +107,10 @@ class NeutronTest(common.HeatTestCase):
class SomeNeutronResource(nr.NeutronResource):
properties_schema = {}
+ @classmethod
+ def is_service_available(cls, context):
+ return True
+
tmpl = rsrc_defn.ResourceDefinition('test_res', 'Foo')
stack = mock.MagicMock()
stack.has_cache_data = mock.Mock(return_value=False)
diff --git a/heat/tests/test_nova_floatingip.py b/heat/tests/nova/test_nova_floatingip.py
index 39eb340e7..39eb340e7 100644
--- a/heat/tests/test_nova_floatingip.py
+++ b/heat/tests/nova/test_nova_floatingip.py
diff --git a/heat/tests/test_nova_keypair.py b/heat/tests/nova/test_nova_keypair.py
index e625ad34a..e625ad34a 100644
--- a/heat/tests/test_nova_keypair.py
+++ b/heat/tests/nova/test_nova_keypair.py
diff --git a/heat/tests/test_nova_servergroup.py b/heat/tests/nova/test_nova_servergroup.py
index def6833b6..def6833b6 100644
--- a/heat/tests/test_nova_servergroup.py
+++ b/heat/tests/nova/test_nova_servergroup.py
diff --git a/heat/tests/test_server.py b/heat/tests/nova/test_server.py
index 8525c46b0..ecef7007d 100644
--- a/heat/tests/test_server.py
+++ b/heat/tests/nova/test_server.py
@@ -898,7 +898,7 @@ class ServersTest(common.HeatTestCase):
server._delete_queue()
zc.queue.assert_called_once_with(queue_id)
- zc.queue.delete.assert_called_once()
+ zc.queue(queue_id).delete.assert_called_once_with()
self.m.VerifyAll()
@@ -1605,9 +1605,9 @@ class ServersTest(common.HeatTestCase):
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
- def activate_status(server):
- server.status = 'ACTIVE'
- return_server.get = activate_status.__get__(return_server)
+ def fail_status(server):
+ server.status = 'ERROR'
+ return_server.get = fail_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
@@ -1618,10 +1618,57 @@ class ServersTest(common.HeatTestCase):
error = self.assertRaises(exception.ResourceFailure, updater)
self.assertEqual(
"Error: resources.srv_update2: Resizing to 'm1.small' failed, "
- "status 'ACTIVE'", six.text_type(error))
+ "status 'ERROR'", six.text_type(error))
self.assertEqual((server.UPDATE, server.FAILED), server.state)
self.m.VerifyAll()
+ def test_server_update_flavor_resize_has_not_started(self):
+ """Test update of server flavour if server resize has not started.
+
+ Server resize is asynchronous operation in nova. So when heat is
+ requesting resize and polling the server then the server may still be
+ in ACTIVE state. So we need to wait some amount of time till the server
+ status becomes RESIZE.
+ """
+ # create the server for resizing
+ server = self.fc.servers.list()[1]
+ server.id = '1234'
+ server_resource = self._create_test_server(server,
+ 'resize_server')
+ # prepare template with resized server
+ update_template = copy.deepcopy(server_resource.t)
+ update_template['Properties']['flavor'] = 'm1.small'
+
+ self.m.StubOutWithMock(self.fc.servers, 'get')
+ self.fc.servers.get('1234').AndReturn(server)
+
+ # define status transition when server resize
+ # ACTIVE(initial) -> ACTIVE -> RESIZE -> VERIFY_RESIZE
+
+ def active_status(srv):
+ srv.status = 'ACTIVE'
+ server.get = active_status.__get__(server)
+
+ def resize_status(srv):
+ srv.status = 'RESIZE'
+ server.get = resize_status.__get__(server)
+
+ def verify_resize_status(srv):
+ srv.status = 'VERIFY_RESIZE'
+ server.get = verify_resize_status.__get__(server)
+
+ self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
+ self.fc.client.post_servers_1234_action(
+ body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
+ self.fc.client.post_servers_1234_action(
+ body={'confirmResize': None}).AndReturn((202, None))
+ self.m.ReplayAll()
+ # check that server resize has finished correctly
+ scheduler.TaskRunner(server_resource.update, update_template)()
+ self.assertEqual((server_resource.UPDATE, server_resource.COMPLETE),
+ server_resource.state)
+ self.m.VerifyAll()
+
def test_server_update_server_flavor_replace(self):
stack_name = 'update_flvrep'
(tmpl, stack) = self._setup_test_stack(stack_name)
@@ -1858,16 +1905,13 @@ class ServersTest(common.HeatTestCase):
self.m.VerifyAll()
- def test_server_status_suspend(self):
+ def _test_server_status_suspend(self, name, state=('CREATE', 'COMPLETE')):
return_server = self.fc.servers.list()[1]
- server = self._create_test_server(return_server,
- 'srv_susp_w')
+ server = self._create_test_server(return_server, name)
server.resource_id = '1234'
- self.m.ReplayAll()
+ server.state_set(state[0], state[1])
- # Override the get_servers_1234 handler status to SUSPENDED, but
- # return the ACTIVE state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'ACTIVE'
@@ -1884,6 +1928,19 @@ class ServersTest(common.HeatTestCase):
self.m.VerifyAll()
+ def test_server_suspend_in_create_complete(self):
+ self._test_server_status_suspend('test_suspend_in_create_complete')
+
+ def test_server_suspend_in_suspend_failed(self):
+ self._test_server_status_suspend(
+ name='test_suspend_in_suspend_failed',
+ state=('SUSPEND', 'FAILED'))
+
+ def test_server_suspend_in_suspend_complete(self):
+ self._test_server_status_suspend(
+ name='test_suspend_in_suspend_complete',
+ state=('SUSPEND', 'COMPLETE'))
+
def test_server_status_suspend_unknown_status(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
@@ -1915,16 +1972,13 @@ class ServersTest(common.HeatTestCase):
self.m.VerifyAll()
- def test_server_status_resume(self):
+ def _test_server_status_resume(self, name, state=('SUSPEND', 'COMPLETE')):
return_server = self.fc.servers.list()[1]
- server = self._create_test_server(return_server,
- 'srv_res_w')
+ server = self._create_test_server(return_server, name)
server.resource_id = '1234'
- self.m.ReplayAll()
+ server.state_set(state[0], state[1])
- # Override the get_servers_1234 handler status to ACTIVE, but
- # return the SUSPENDED state first (twice, so we sleep)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'SUSPENDED'
@@ -1936,13 +1990,25 @@ class ServersTest(common.HeatTestCase):
get().AndReturn((200, d2))
self.m.ReplayAll()
- server.state_set(server.SUSPEND, server.COMPLETE)
-
scheduler.TaskRunner(server.resume)()
self.assertEqual((server.RESUME, server.COMPLETE), server.state)
self.m.VerifyAll()
+ def test_server_resume_in_suspend_complete(self):
+ self._test_server_status_resume(
+ name='test_resume_in_suspend_complete')
+
+ def test_server_resume_in_resume_failed(self):
+ self._test_server_status_resume(
+ name='test_resume_in_resume_failed',
+ state=('RESUME', 'FAILED'))
+
+ def test_server_resume_in_resume_complete(self):
+ self._test_server_status_resume(
+ name='test_resume_in_resume_complete',
+ state=('RESUME', 'COMPLETE'))
+
def test_server_status_resume_no_resource_id(self):
return_server = self.fc.servers.list()[1]
server = self._create_test_server(return_server,
diff --git a/heat/tests/openstack/designate/__init__.py b/heat/tests/openstack/designate/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/openstack/designate/__init__.py
diff --git a/heat/tests/test_attributes.py b/heat/tests/test_attributes.py
index d57d8094f..2c7c7fb86 100644
--- a/heat/tests/test_attributes.py
+++ b/heat/tests/test_attributes.py
@@ -199,6 +199,18 @@ class AttributesTypeTest(common.HeatTestCase):
('integer_type',
dict(a_type=attributes.Schema.INTEGER,
value=1,
+ invalid_value='invalid_value')),
+ ('boolean_type',
+ dict(a_type=attributes.Schema.BOOLEAN,
+ value=True,
+ invalid_value='invalid_value')),
+ ('boolean_type_string_true',
+ dict(a_type=attributes.Schema.BOOLEAN,
+ value="True",
+ invalid_value='invalid_value')),
+ ('boolean_type_string_false',
+ dict(a_type=attributes.Schema.BOOLEAN,
+ value="false",
invalid_value='invalid_value'))
]
diff --git a/heat/tests/test_crypt.py b/heat/tests/test_crypt.py
new file mode 100644
index 000000000..165c6a853
--- /dev/null
+++ b/heat/tests/test_crypt.py
@@ -0,0 +1,38 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import six
+
+from heat.common import config
+from heat.common import crypt
+from heat.common import exception
+from heat.tests import common
+
+
+class CryptTests(common.HeatTestCase):
+
+ def test_fernet_key(self):
+ key = 'x' * 16
+ method, result = crypt.encrypt('foo', key)
+ self.assertEqual('cryptography_decrypt_v1', method)
+ self.assertIsNotNone(result)
+
+ def test_init_auth_encryption_key_length(self):
+ """Test for length of the auth_encryption_length in config file"""
+ cfg.CONF.set_override('auth_encryption_key', 'abcdefghijklma')
+ err = self.assertRaises(exception.Error,
+ config.startup_sanity_check)
+ exp_msg = ('heat.conf misconfigured, auth_encryption_key '
+ 'must be 32 characters')
+ self.assertIn(exp_msg, six.text_type(err))
diff --git a/heat/tests/test_engine_service.py b/heat/tests/test_engine_service.py
index 7f68ef73d..acb559585 100644
--- a/heat/tests/test_engine_service.py
+++ b/heat/tests/test_engine_service.py
@@ -32,14 +32,13 @@ from heat.engine import dependencies
from heat.engine import environment
from heat.engine.hot import template as hottemplate
from heat.engine import resource as res
-from heat.engine.resources.aws.ec2 import instance as instances
from heat.engine import service
from heat.engine import service_stack_watch
from heat.engine import stack as parser
from heat.engine import stack_lock
from heat.engine import template as templatem
from heat.engine import watchrule
-from heat.objects import event as event_object
+from heat.objects import raw_template as raw_template_object
from heat.objects import resource as resource_objects
from heat.objects import stack as stack_object
from heat.objects import sync_point as sync_point_object
@@ -502,18 +501,25 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
stack.mark_complete(stack.current_traversal)
self.assertTrue(stack.purge_db.called)
- def test_purge_db_deletes_previous_template(self, mock_cr):
+ @mock.patch.object(raw_template_object.RawTemplate, 'delete')
+ def test_purge_db_deletes_previous_template(self, mock_tmpl_delete,
+ mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
- prev_tmpl = templatem.Template.create_empty_template()
- prev_tmpl.store()
- stack.prev_raw_template_id = prev_tmpl.id
- stack.store()
+ stack.prev_raw_template_id = 10
+ stack.purge_db()
+ self.assertTrue(mock_tmpl_delete.called)
+
+ @mock.patch.object(raw_template_object.RawTemplate, 'delete')
+ def test_purge_db_does_not_delete_previous_template_when_stack_fails(
+ self, mock_tmpl_delete, mock_cr):
+ stack = tools.get_stack('test_stack', utils.dummy_context(),
+ template=tools.string_template_five,
+ convergence=True)
+ stack.status = stack.FAILED
stack.purge_db()
- self.assertRaises(exception.NotFound,
- templatem.Template.load,
- stack.context, prev_tmpl.id)
+ self.assertFalse(mock_tmpl_delete.called)
def test_purge_db_deletes_sync_points(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
@@ -525,17 +531,16 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
stack.context, stack.id, stack.current_traversal)
self.assertEqual(0, rows)
- def test_purge_db_deletes_stack_for_deleted_stack(self, mock_cr):
+ @mock.patch.object(stack_object.Stack, 'delete')
+ def test_purge_db_deletes_stack_for_deleted_stack(self, mock_stack_delete,
+ mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
template=tools.string_template_five,
convergence=True)
stack.store()
stack.state_set(stack.DELETE, stack.COMPLETE, 'test reason')
stack.purge_db()
- self.assertRaises(exception.NotFound,
- parser.Stack.load,
- stack.context, stack_id=stack.id,
- show_deleted=False)
+ self.assertTrue(mock_stack_delete.called)
def test_get_best_existing_db_resource(self, mock_cr):
stack = tools.get_stack('test_stack', utils.dummy_context(),
@@ -1517,218 +1522,6 @@ class StackServiceTest(common.HeatTestCase):
ctx2,
self.stack.name))
- @tools.stack_context('service_event_list_test_stack')
- def test_stack_event_list(self):
- self.m.StubOutWithMock(service.EngineService, '_get_stack')
- s = stack_object.Stack.get_by_id(self.ctx, self.stack.id)
- service.EngineService._get_stack(self.ctx,
- self.stack.identifier(),
- show_deleted=True).AndReturn(s)
- self.m.ReplayAll()
-
- events = self.eng.list_events(self.ctx, self.stack.identifier())
-
- self.assertEqual(4, len(events))
- for ev in events:
- self.assertIn('event_identity', ev)
- self.assertIsInstance(ev['event_identity'], dict)
- self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
-
- self.assertIn('resource_name', ev)
- self.assertIn(ev['resource_name'],
- ('service_event_list_test_stack', 'WebServer'))
-
- self.assertIn('physical_resource_id', ev)
-
- self.assertIn('resource_properties', ev)
- # Big long user data field.. it mentions 'wordpress'
- # a few times so this should work.
- if ev.get('resource_properties'):
- user_data = ev['resource_properties']['UserData']
- self.assertIn('wordpress', user_data)
- self.assertEqual('F17-x86_64-gold',
- ev['resource_properties']['ImageId'])
- self.assertEqual('m1.large',
- ev['resource_properties']['InstanceType'])
-
- self.assertEqual('CREATE', ev['resource_action'])
- self.assertIn(ev['resource_status'], ('IN_PROGRESS', 'COMPLETE'))
-
- self.assertIn('resource_status_reason', ev)
- self.assertIn(ev['resource_status_reason'],
- ('state changed',
- 'Stack CREATE started',
- 'Stack CREATE completed successfully'))
-
- self.assertIn('resource_type', ev)
- self.assertIn(ev['resource_type'],
- ('AWS::EC2::Instance', 'OS::Heat::Stack'))
-
- self.assertIn('stack_identity', ev)
-
- self.assertIn('stack_name', ev)
- self.assertEqual(self.stack.name, ev['stack_name'])
-
- self.assertIn('event_time', ev)
-
- self.m.VerifyAll()
-
- @tools.stack_context('event_list_deleted_stack')
- def test_stack_event_list_deleted_resource(self):
-
- thread = self.m.CreateMockAnything()
- thread.link(mox.IgnoreArg()).AndReturn(None)
- thread.link(mox.IgnoreArg(), self.stack.id,
- mox.IgnoreArg()).AndReturn(None)
-
- def run(stack_id, func, *args, **kwargs):
- func(*args)
- return thread
- self.eng.thread_group_mgr.start = run
-
- new_tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
- 'Resources': {'AResource': {'Type':
- 'GenericResourceType'}}}
-
- self.m.StubOutWithMock(instances.Instance, 'handle_delete')
- instances.Instance.handle_delete()
-
- self.m.ReplayAll()
-
- result = self.eng.update_stack(self.ctx, self.stack.identifier(),
- new_tmpl, None, None, {})
-
- # The self.stack reference needs to be updated. Since the underlying
- # stack is updated in update_stack, the original reference is now
- # pointing to an orphaned stack object.
- self.stack = parser.Stack.load(self.ctx, stack_id=result['stack_id'])
-
- self.assertEqual(result, self.stack.identifier())
- self.assertIsInstance(result, dict)
- self.assertTrue(result['stack_id'])
- events = self.eng.list_events(self.ctx, self.stack.identifier())
-
- self.assertEqual(10, len(events))
-
- for ev in events:
- self.assertIn('event_identity', ev)
- self.assertIsInstance(ev['event_identity'], dict)
- self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
-
- self.assertIn('resource_name', ev)
- self.assertIn('physical_resource_id', ev)
- self.assertIn('resource_properties', ev)
- self.assertIn('resource_status_reason', ev)
-
- self.assertIn(ev['resource_action'],
- ('CREATE', 'UPDATE', 'DELETE'))
- self.assertIn(ev['resource_status'], ('IN_PROGRESS', 'COMPLETE'))
-
- self.assertIn('resource_type', ev)
- self.assertIn(ev['resource_type'], ('AWS::EC2::Instance',
- 'GenericResourceType',
- 'OS::Heat::Stack'))
-
- self.assertIn('stack_identity', ev)
-
- self.assertIn('stack_name', ev)
- self.assertEqual(self.stack.name, ev['stack_name'])
-
- self.assertIn('event_time', ev)
-
- self.m.VerifyAll()
-
- @tools.stack_context('service_event_list_test_stack')
- def test_stack_event_list_by_tenant(self):
- events = self.eng.list_events(self.ctx, None)
-
- self.assertEqual(4, len(events))
- for ev in events:
- self.assertIn('event_identity', ev)
- self.assertIsInstance(ev['event_identity'], dict)
- self.assertTrue(ev['event_identity']['path'].rsplit('/', 1)[1])
-
- self.assertIn('resource_name', ev)
- self.assertIn(ev['resource_name'],
- ('WebServer', 'service_event_list_test_stack'))
-
- self.assertIn('physical_resource_id', ev)
-
- self.assertIn('resource_properties', ev)
- # Big long user data field.. it mentions 'wordpress'
- # a few times so this should work.
- if ev.get('resource_properties'):
- user_data = ev['resource_properties']['UserData']
- self.assertIn('wordpress', user_data)
- self.assertEqual('F17-x86_64-gold',
- ev['resource_properties']['ImageId'])
- self.assertEqual('m1.large',
- ev['resource_properties']['InstanceType'])
-
- self.assertEqual('CREATE', ev['resource_action'])
- self.assertIn(ev['resource_status'], ('IN_PROGRESS', 'COMPLETE'))
-
- self.assertIn('resource_status_reason', ev)
- self.assertIn(ev['resource_status_reason'],
- ('state changed',
- 'Stack CREATE started',
- 'Stack CREATE completed successfully'))
-
- self.assertIn('resource_type', ev)
- self.assertIn(ev['resource_type'],
- ('AWS::EC2::Instance', 'OS::Heat::Stack'))
-
- self.assertIn('stack_identity', ev)
-
- self.assertIn('stack_name', ev)
- self.assertEqual(self.stack.name, ev['stack_name'])
-
- self.assertIn('event_time', ev)
-
- self.m.VerifyAll()
-
- @mock.patch.object(event_object.Event, 'get_all_by_stack')
- @mock.patch.object(service.EngineService, '_get_stack')
- def test_stack_events_list_passes_marker_and_filters(self,
- mock_get_stack,
- mock_events_get_all):
- limit = object()
- marker = object()
- sort_keys = object()
- sort_dir = object()
- filters = object()
- s = mock.Mock(id=1)
- mock_get_stack.return_value = s
- self.eng.list_events(self.ctx, 1, limit=limit,
- marker=marker, sort_keys=sort_keys,
- sort_dir=sort_dir, filters=filters)
- mock_events_get_all.assert_called_once_with(self.ctx,
- 1,
- limit=limit,
- sort_keys=sort_keys,
- marker=marker,
- sort_dir=sort_dir,
- filters=filters)
-
- @mock.patch.object(event_object.Event, 'get_all_by_tenant')
- def test_tenant_events_list_passes_marker_and_filters(
- self, mock_tenant_events_get_all):
- limit = object()
- marker = object()
- sort_keys = object()
- sort_dir = object()
- filters = object()
-
- self.eng.list_events(self.ctx, None, limit=limit,
- marker=marker, sort_keys=sort_keys,
- sort_dir=sort_dir, filters=filters)
- mock_tenant_events_get_all.assert_called_once_with(self.ctx,
- limit=limit,
- sort_keys=sort_keys,
- marker=marker,
- sort_dir=sort_dir,
- filters=filters)
-
@tools.stack_context('service_list_all_test_stack')
def test_stack_list_all(self):
self.m.StubOutWithMock(parser.Stack, '_from_db')
@@ -2171,13 +1964,18 @@ class StackServiceTest(common.HeatTestCase):
self.assertIn('WordPress', s['description'])
self.assertIn('parameters', s)
- def test_list_resource_types(self):
+ @mock.patch.object(res.Resource, 'is_service_available')
+ def test_list_resource_types(self, mock_is_service_available):
+ mock_is_service_available.return_value = True
resources = self.eng.list_resource_types(self.ctx)
self.assertIsInstance(resources, list)
self.assertIn('AWS::EC2::Instance', resources)
self.assertIn('AWS::RDS::DBInstance', resources)
- def test_list_resource_types_deprecated(self):
+ @mock.patch.object(res.Resource, 'is_service_available')
+ def test_list_resource_types_deprecated(self,
+ mock_is_service_available):
+ mock_is_service_available.return_value = True
resources = self.eng.list_resource_types(self.ctx, "DEPRECATED")
self.assertEqual(set(['OS::Neutron::RouterGateway',
'OS::Heat::HARestarter',
@@ -2185,7 +1983,10 @@ class StackServiceTest(common.HeatTestCase):
'OS::Heat::StructuredDeployments']),
set(resources))
- def test_list_resource_types_supported(self):
+ @mock.patch.object(res.Resource, 'is_service_available')
+ def test_list_resource_types_supported(self,
+ mock_is_service_available):
+ mock_is_service_available.return_value = True
resources = self.eng.list_resource_types(self.ctx, "SUPPORTED")
self.assertNotIn(['OS::Neutron::RouterGateway'], resources)
self.assertIn('AWS::EC2::Instance', resources)
@@ -2212,6 +2013,15 @@ class StackServiceTest(common.HeatTestCase):
{'version': 'c.d', 'type': 'hot'}]
self.assertEqual(expected, templates)
+ @mock.patch.object(res.Resource, 'is_service_available')
+ def test_list_resource_types_unavailable(
+ self,
+ mock_is_service_available):
+ mock_is_service_available.return_value = False
+ resources = self.eng.list_resource_types(self.ctx)
+ # Check for an known resource, not listed
+ self.assertNotIn('OS::Nova::Server', resources)
+
def test_resource_schema(self):
type_name = 'ResourceWithPropsType'
expected = {
@@ -3112,10 +2922,10 @@ class StackServiceTest(common.HeatTestCase):
mock_get_all.assert_called_once_with(self.ctx,
filters=filters,
tenant_safe=False)
- mock_stack_load.assert_call_once_with(self.ctx,
- stack=db_stack,
- use_stored_context=True)
- mock_thread.start_with_acquired_lock.assert_call_once_with(
- fake_stack, fake_stack.state_set, fake_stack.action,
- parser.Stack.FAILED, 'Engine went down during stack CREATE'
+ mock_stack_load.assert_called_once_with(self.ctx,
+ stack=db_stack,
+ use_stored_context=True)
+ mock_thread.start_with_acquired_lock.assert_called_once_with(
+ fake_stack, fake_lock, fake_stack.state_set, fake_stack.action,
+ fake_stack.FAILED, 'Engine went down during stack CREATE'
)
diff --git a/heat/tests/test_engine_worker.py b/heat/tests/test_engine_worker.py
index de0f48c5b..87aa62f6e 100644
--- a/heat/tests/test_engine_worker.py
+++ b/heat/tests/test_engine_worker.py
@@ -175,21 +175,54 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
- @mock.patch.object(resource.Resource, 'make_replacement')
+ @mock.patch.object(worker.WorkerService, '_try_steal_engine_lock')
def test_is_update_traversal_raise_update_inprogress(
- self, mock_mr, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
+ self, mock_tsl, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
mock_cru.side_effect = resource.UpdateInProgress
+ self.worker.engine_id = 'some-thing-else'
+ mock_tsl.return_value = True
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update)
mock_cru.assert_called_once_with(self.resource,
self.resource.stack.t.id,
{}, self.worker.engine_id)
- self.assertFalse(mock_mr.called)
self.assertFalse(mock_crc.called)
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
+ def test_try_steal_lock_alive(
+ self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
+ res = self.worker._try_steal_engine_lock(self.ctx,
+ self.resource.id)
+ self.assertFalse(res)
+
+ @mock.patch.object(worker.listener_client, 'EngineListenerClient')
+ @mock.patch.object(worker.resource_objects.Resource, 'get_obj')
+ def test_try_steal_lock_dead(
+ self, mock_get, mock_elc, mock_cru, mock_crc, mock_pcr,
+ mock_csc, mock_cid):
+ fake_res = mock.Mock()
+ fake_res.engine_id = 'some-thing-else'
+ mock_get.return_value = fake_res
+ mock_elc.return_value.is_alive.return_value = False
+ res = self.worker._try_steal_engine_lock(self.ctx,
+ self.resource.id)
+ self.assertTrue(res)
+
+ @mock.patch.object(worker.listener_client, 'EngineListenerClient')
+ @mock.patch.object(worker.resource_objects.Resource, 'get_obj')
+ def test_try_steal_lock_not_dead(
+ self, mock_get, mock_elc, mock_cru, mock_crc, mock_pcr,
+ mock_csc, mock_cid):
+ fake_res = mock.Mock()
+ fake_res.engine_id = self.worker.engine_id
+ mock_get.return_value = fake_res
+ mock_elc.return_value.is_alive.return_value = True
+ res = self.worker._try_steal_engine_lock(self.ctx,
+ self.resource.id)
+ self.assertFalse(res)
+
def test_resource_update_failure_sets_stack_state_as_failed(
self, mock_cru, mock_crc, mock_pcr, mock_csc, mock_cid):
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
@@ -203,7 +236,8 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
self.is_update)
s = self.stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual((s.UPDATE, s.FAILED), (s.action, s.status))
- self.assertEqual(u'ResourceNotAvailable: resources.A: The Resource (A)'
+ self.assertEqual('Resource UPDATE failed: '
+ 'ResourceNotAvailable: resources.A: The Resource (A)'
' is not available.', s.status_reason)
def test_resource_cleanup_failure_sets_stack_state_as_failed(
@@ -220,7 +254,8 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
self.is_update)
s = self.stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual((s.UPDATE, s.FAILED), (s.action, s.status))
- self.assertEqual(u'ResourceNotAvailable: resources.A: The Resource (A)'
+ self.assertEqual('Resource UPDATE failed: '
+ 'ResourceNotAvailable: resources.A: The Resource (A)'
' is not available.', s.status_reason)
def test_resource_update_failure_triggers_rollback_if_enabled(
diff --git a/heat/tests/test_environment.py b/heat/tests/test_environment.py
index d182780f7..c78ad806b 100644
--- a/heat/tests/test_environment.py
+++ b/heat/tests/test_environment.py
@@ -172,6 +172,30 @@ class EnvironmentTest(common.HeatTestCase):
env.get_resource_info('OS::Networking::FloatingIP',
'my_fip').value)
+ def test_register_with_path(self):
+ yaml_env = '''
+ resource_registry:
+ test::one: a.yaml
+ resources:
+ res_x:
+ test::two: b.yaml
+'''
+
+ env = environment.Environment(environment_format.parse(yaml_env))
+ self.assertEqual('a.yaml', env.get_resource_info('test::one').value)
+ self.assertEqual('b.yaml',
+ env.get_resource_info('test::two', 'res_x').value)
+
+ env2 = environment.Environment()
+ env2.register_class('test::one',
+ 'a.yaml',
+ path=['test::one'])
+ env2.register_class('test::two',
+ 'b.yaml',
+ path=['resources', 'res_x', 'test::two'])
+
+ self.assertEqual(env.user_env_as_dict(), env2.user_env_as_dict())
+
def test_constraints(self):
env = environment.Environment({})
diff --git a/heat/tests/test_hot.py b/heat/tests/test_hot.py
index fcf3dbeb2..b0349f4a5 100644
--- a/heat/tests/test_hot.py
+++ b/heat/tests/test_hot.py
@@ -746,6 +746,51 @@ class HOTemplateTest(common.HeatTestCase):
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('Algorithm must be one of', six.text_type(exc))
+ def test_str_split(self):
+ tmpl = template.Template(hot_liberty_tpl_empty)
+ snippet = {'str_split': [',', 'bar,baz']}
+ snippet_resolved = ['bar', 'baz']
+ self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
+
+ def test_str_split_index(self):
+ tmpl = template.Template(hot_liberty_tpl_empty)
+ snippet = {'str_split': [',', 'bar,baz', 1]}
+ snippet_resolved = 'baz'
+ self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
+
+ def test_str_split_index_str(self):
+ tmpl = template.Template(hot_liberty_tpl_empty)
+ snippet = {'str_split': [',', 'bar,baz', '1']}
+ snippet_resolved = 'baz'
+ self.assertEqual(snippet_resolved, self.resolve(snippet, tmpl))
+
+ def test_str_split_index_bad(self):
+ tmpl = template.Template(hot_liberty_tpl_empty)
+ snippet = {'str_split': [',', 'bar,baz', 'bad']}
+ exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
+ self.assertIn('Incorrect index to \"str_split\"', six.text_type(exc))
+
+ def test_str_split_index_out_of_range(self):
+ tmpl = template.Template(hot_liberty_tpl_empty)
+ snippet = {'str_split': [',', 'bar,baz', '2']}
+ exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
+ expected = 'Incorrect index to \"str_split\" should be between 0 and 1'
+ self.assertEqual(expected, six.text_type(exc))
+
+ def test_str_split_bad_novalue(self):
+ tmpl = template.Template(hot_liberty_tpl_empty)
+ snippet = {'str_split': [',']}
+ exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
+ self.assertIn('Incorrect arguments to \"str_split\"',
+ six.text_type(exc))
+
+ def test_str_split_bad_empty(self):
+ tmpl = template.Template(hot_liberty_tpl_empty)
+ snippet = {'str_split': []}
+ exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
+ self.assertIn('Incorrect arguments to \"str_split\"',
+ six.text_type(exc))
+
def test_prevent_parameters_access(self):
"""
Test that the parameters section can't be accessed using the template
diff --git a/heat/tests/test_magnum_baymodel.py b/heat/tests/test_magnum_baymodel.py
index 92efe0ab2..c98f80abb 100644
--- a/heat/tests/test_magnum_baymodel.py
+++ b/heat/tests/test_magnum_baymodel.py
@@ -43,18 +43,25 @@ magnum_template = '''
RESOURCE_TYPE = 'OS::Magnum::BayModel'
+class MagnumBayModelTestResource(baymodel.BayModel):
+ @classmethod
+ def is_service_available(cls, context):
+ return True
+
+
class TestMagnumBayModel(common.HeatTestCase):
def setUp(self):
super(TestMagnumBayModel, self).setUp()
self.ctx = utils.dummy_context()
- resource._register_class(RESOURCE_TYPE, baymodel.BayModel)
+ resource._register_class(RESOURCE_TYPE, MagnumBayModelTestResource)
t = template_format.parse(magnum_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['test_baymodel']
self.client = mock.Mock()
- self.patchobject(baymodel.BayModel, 'client', return_value=self.client)
+ self.patchobject(MagnumBayModelTestResource, 'client',
+ return_value=self.client)
self.stub_FlavorConstraint_validate()
self.stub_KeypairConstraint_validate()
self.stub_ImageConstraint_validate()
@@ -65,7 +72,7 @@ class TestMagnumBayModel(common.HeatTestCase):
self.test_bay_model = self.stack['test_baymodel']
value = mock.MagicMock(uuid=self.resource_id)
self.client.baymodels.create.return_value = value
- bm = baymodel.BayModel(name, snippet, stack)
+ bm = MagnumBayModelTestResource(name, snippet, stack)
scheduler.TaskRunner(bm.create)()
return bm
diff --git a/heat/tests/test_metadata_refresh.py b/heat/tests/test_metadata_refresh.py
index ee898f868..1e2dfcc27 100644
--- a/heat/tests/test_metadata_refresh.py
+++ b/heat/tests/test_metadata_refresh.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-
+import mock
import mox
from heat.common import identifier
@@ -239,7 +239,9 @@ class WaitCondMetadataUpdateTest(common.HeatTestCase):
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
return stack
- def test_wait_meta(self):
+ @mock.patch(('heat.engine.resources.aws.ec2.instance.Instance'
+ '.is_service_available'))
+ def test_wait_meta(self, mock_is_service_available):
'''
1 create stack
2 assert empty instance metadata
@@ -247,6 +249,7 @@ class WaitCondMetadataUpdateTest(common.HeatTestCase):
4 assert valid waitcond metadata
5 assert valid instance metadata
'''
+ mock_is_service_available.return_value = True
self.stack = self.create_stack()
watch = self.stack['WC']
diff --git a/heat/tests/test_parameters.py b/heat/tests/test_parameters.py
index 6851c72c6..07b090413 100644
--- a/heat/tests/test_parameters.py
+++ b/heat/tests/test_parameters.py
@@ -297,6 +297,13 @@ class ParameterTestSpecific(common.HeatTestCase):
schema['Default'] = 'baz,foo,bar'
p = new_parameter('p', schema)
self.assertEqual('baz,foo,bar'.split(','), p.value())
+ schema['AllowedValues'] = ['1', '3', '5']
+ schema['Default'] = []
+ p = new_parameter('p', schema, [1, 3, 5])
+ self.assertEqual('1,3,5', str(p))
+ schema['Default'] = [1, 3, 5]
+ p = new_parameter('p', schema)
+ self.assertEqual('1,3,5'.split(','), p.value())
def test_list_value_list_bad(self):
schema = {'Type': 'CommaDelimitedList',
diff --git a/heat/tests/test_properties.py b/heat/tests/test_properties.py
index 52cbb3d4e..0f4a2c8ef 100644
--- a/heat/tests/test_properties.py
+++ b/heat/tests/test_properties.py
@@ -343,7 +343,7 @@ class PropertySchemaTest(common.HeatTestCase):
self.assertEqual(properties.Schema.STRING, schema.type)
self.assertEqual(description, schema.description)
- self.assertIsNone(schema.default)
+ self.assertEqual("m1.large", schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
@@ -368,7 +368,7 @@ class PropertySchemaTest(common.HeatTestCase):
self.assertEqual(properties.Schema.STRING, schema.type)
self.assertEqual(description, schema.description)
- self.assertIsNone(schema.default)
+ self.assertEqual("m1.large", schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
@@ -394,7 +394,7 @@ class PropertySchemaTest(common.HeatTestCase):
self.assertEqual(properties.Schema.STRING, schema.type)
self.assertEqual(description, schema.description)
- self.assertIsNone(schema.default)
+ self.assertEqual("m1.large", schema.default)
self.assertFalse(schema.required)
self.assertEqual(2, len(schema.constraints))
@@ -481,7 +481,7 @@ class PropertySchemaTest(common.HeatTestCase):
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.NUMBER, schema.type)
- self.assertIsNone(schema.default)
+ self.assertEqual(default, schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
@@ -501,7 +501,7 @@ class PropertySchemaTest(common.HeatTestCase):
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.NUMBER, schema.type)
- self.assertIsNone(schema.default)
+ self.assertEqual(default, schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
@@ -522,7 +522,7 @@ class PropertySchemaTest(common.HeatTestCase):
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.NUMBER, schema.type)
- self.assertIsNone(schema.default)
+ self.assertEqual(default, schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
@@ -544,7 +544,7 @@ class PropertySchemaTest(common.HeatTestCase):
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.NUMBER, schema.type)
- self.assertIsNone(schema.default)
+ self.assertEqual(default, schema.default)
self.assertFalse(schema.required)
self.assertEqual(1, len(schema.constraints))
self.assertFalse(schema.allow_conversion)
@@ -563,7 +563,7 @@ class PropertySchemaTest(common.HeatTestCase):
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.LIST, schema.type)
- self.assertIsNone(schema.default)
+ self.assertEqual("foo,bar,baz", schema.default)
self.assertFalse(schema.required)
self.assertFalse(schema.allow_conversion)
@@ -576,7 +576,8 @@ class PropertySchemaTest(common.HeatTestCase):
schema = properties.Schema.from_parameter(param)
self.assertEqual(properties.Schema.MAP, schema.type)
- self.assertIsNone(schema.default)
+ self.assertEqual({"foo": "bar", "blarg": "wibble"},
+ schema.default)
self.assertFalse(schema.required)
self.assertTrue(schema.allow_conversion)
@@ -1207,6 +1208,7 @@ class PropertiesTest(common.HeatTestCase):
"DBUsername": {
"type": "string",
"description": "The WordPress database admin account username",
+ "default": "admin",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1222,6 +1224,7 @@ class PropertiesTest(common.HeatTestCase):
"LinuxDistribution": {
"type": "string",
"description": "Distribution of choice",
+ "default": "F17",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1233,6 +1236,7 @@ class PropertiesTest(common.HeatTestCase):
"InstanceType": {
"type": "string",
"description": "WebServer EC2 instance type",
+ "default": "m1.large",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1253,6 +1257,7 @@ class PropertiesTest(common.HeatTestCase):
"DBRootPassword": {
"type": "string",
"description": "Root password for MySQL",
+ "default": "admin",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1276,6 +1281,7 @@ class PropertiesTest(common.HeatTestCase):
"DBPassword": {
"type": "string",
"description": "The WordPress database admin account password",
+ "default": "admin",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1291,6 +1297,7 @@ class PropertiesTest(common.HeatTestCase):
"DBName": {
"type": "string",
"description": "The WordPress database name",
+ "default": "wordpress",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1404,6 +1411,7 @@ class PropertiesTest(common.HeatTestCase):
"InstanceType": {
"type": "string",
"description": "WebServer EC2 instance type",
+ "default": "m1.large",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1416,6 +1424,7 @@ class PropertiesTest(common.HeatTestCase):
]
},
"LinuxDistribution": {
+ "default": "F17",
"type": "string",
"description": "Distribution of choice",
"required": False,
@@ -1430,6 +1439,7 @@ class PropertiesTest(common.HeatTestCase):
"DBName": {
"type": "string",
"description": "The WordPress database name",
+ "default": "wordpress",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1444,6 +1454,7 @@ class PropertiesTest(common.HeatTestCase):
"DBUsername": {
"type": "string",
"description": "The WordPress database admin account username",
+ "default": "admin",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1458,6 +1469,7 @@ class PropertiesTest(common.HeatTestCase):
"DBPassword": {
"type": "string",
"description": "The WordPress database admin account password",
+ "default": "admin",
"required": False,
'update_allowed': True,
'immutable': False,
@@ -1472,6 +1484,7 @@ class PropertiesTest(common.HeatTestCase):
"DBRootPassword": {
"type": "string",
"description": "Root password for MySQL",
+ "default": "admin",
"required": False,
'update_allowed': True,
'immutable': False,
diff --git a/heat/tests/test_provider_template.py b/heat/tests/test_provider_template.py
index b475161af..5fed02a73 100644
--- a/heat/tests/test_provider_template.py
+++ b/heat/tests/test_provider_template.py
@@ -903,7 +903,7 @@ class TemplateResourceCrudTest(common.HeatTestCase):
self.res.handle_create()
self.res.create_with_template.assert_called_once_with(
- self.provider, {'Foo': 'bar'})
+ self.provider, {'Foo': 'bar', 'Blarg': 'wibble'})
def test_handle_adopt(self):
self.res.create_with_template = mock.Mock(return_value=None)
@@ -911,7 +911,8 @@ class TemplateResourceCrudTest(common.HeatTestCase):
self.res.handle_adopt(resource_data={'resource_id': 'fred'})
self.res.create_with_template.assert_called_once_with(
- self.provider, {'Foo': 'bar'}, adopt_data={'resource_id': 'fred'})
+ self.provider, {'Foo': 'bar', 'Blarg': 'wibble'},
+ adopt_data={'resource_id': 'fred'})
def test_handle_update(self):
self.res.update_with_template = mock.Mock(return_value=None)
@@ -919,7 +920,7 @@ class TemplateResourceCrudTest(common.HeatTestCase):
self.res.handle_update(self.defn, None, None)
self.res.update_with_template.assert_called_once_with(
- self.provider, {'Foo': 'bar'})
+ self.provider, {'Foo': 'bar', 'Blarg': 'wibble'})
def test_handle_delete(self):
self.res.rpc_client = mock.MagicMock()
diff --git a/heat/tests/test_resource.py b/heat/tests/test_resource.py
index 8603e19e1..592da4220 100644
--- a/heat/tests/test_resource.py
+++ b/heat/tests/test_resource.py
@@ -28,6 +28,7 @@ from heat.common import timeutils
from heat.db import api as db_api
from heat.engine import attributes
from heat.engine.cfn import functions as cfn_funcs
+from heat.engine import clients
from heat.engine import constraints
from heat.engine import dependencies
from heat.engine import environment
@@ -979,7 +980,7 @@ class ResourceTest(common.HeatTestCase):
scheduler.TaskRunner(res.resume)()
self.assertEqual((res.RESUME, res.COMPLETE), res.state)
- def test_suspend_fail_inprogress(self):
+ def test_suspend_fail_invalid_states(self):
tmpl = rsrc_defn.ResourceDefinition('test_resource',
'GenericResourceType',
{'Foo': 'abc'})
@@ -987,19 +988,19 @@ class ResourceTest(common.HeatTestCase):
scheduler.TaskRunner(res.create)()
self.assertEqual((res.CREATE, res.COMPLETE), res.state)
- res.state_set(res.CREATE, res.IN_PROGRESS)
- suspend = scheduler.TaskRunner(res.suspend)
- self.assertRaises(exception.ResourceFailure, suspend)
+ invalid_actions = (a for a in res.ACTIONS if a != res.SUSPEND)
+ invalid_status = (s for s in res.STATUSES if s != res.COMPLETE)
+ invalid_states = [s for s in
+ itertools.product(invalid_actions, invalid_status)]
- res.state_set(res.UPDATE, res.IN_PROGRESS)
- suspend = scheduler.TaskRunner(res.suspend)
- self.assertRaises(exception.ResourceFailure, suspend)
-
- res.state_set(res.DELETE, res.IN_PROGRESS)
- suspend = scheduler.TaskRunner(res.suspend)
- self.assertRaises(exception.ResourceFailure, suspend)
+ for state in invalid_states:
+ res.state_set(*state)
+ suspend = scheduler.TaskRunner(res.suspend)
+ expected = 'State %s invalid for suspend' % six.text_type(state)
+ exc = self.assertRaises(exception.ResourceFailure, suspend)
+ self.assertIn(expected, six.text_type(exc))
- def test_resume_fail_not_suspend_complete(self):
+ def test_resume_fail_invalid_states(self):
tmpl = rsrc_defn.ResourceDefinition('test_resource',
'GenericResourceType',
{'Foo': 'abc'})
@@ -1007,13 +1008,17 @@ class ResourceTest(common.HeatTestCase):
scheduler.TaskRunner(res.create)()
self.assertEqual((res.CREATE, res.COMPLETE), res.state)
- non_suspended_states = [s for s in
- itertools.product(res.ACTIONS, res.STATUSES)
- if s != (res.SUSPEND, res.COMPLETE)]
- for state in non_suspended_states:
+ invalid_states = [s for s in
+ itertools.product(res.ACTIONS, res.STATUSES)
+ if s not in ((res.SUSPEND, res.COMPLETE),
+ (res.RESUME, res.FAILED),
+ (res.RESUME, res.COMPLETE))]
+ for state in invalid_states:
res.state_set(*state)
resume = scheduler.TaskRunner(res.resume)
- self.assertRaises(exception.ResourceFailure, resume)
+ expected = 'State %s invalid for resume' % six.text_type(state)
+ exc = self.assertRaises(exception.ResourceFailure, resume)
+ self.assertIn(expected, six.text_type(exc))
def test_suspend_fail_exception(self):
tmpl = rsrc_defn.ResourceDefinition('test_resource',
@@ -1427,13 +1432,31 @@ class ResourceTest(common.HeatTestCase):
res = generic_rsrc.GenericResource('test_res', tmpl, self.stack)
res._store()
self._assert_resource_lock(res.id, None, None)
- res.create_convergence('template_key', {(1, True): {},
- (1, True): {}},
- 'engine-007')
+ res_data = {(1, True): {u'id': 1, u'name': 'A', 'attrs': {}},
+ (2, True): {u'id': 3, u'name': 'B', 'attrs': {}}}
+ res.create_convergence(res_data, 'engine-007')
mock_create.assert_called_once_with()
- self.assertEqual('template_key', res.current_template_id)
- self.assertEqual([1], res.requires)
+ self.assertItemsEqual([1, 3], res.requires)
+ self._assert_resource_lock(res.id, None, 2)
+
+ def test_create_convergence_sets_requires_for_failure(self):
+ '''
+ Ensure that requires are computed correctly even if resource
+ create fails,
+ '''
+ tmpl = rsrc_defn.ResourceDefinition('test_res', 'Foo')
+ res = generic_rsrc.GenericResource('test_res', tmpl, self.stack)
+ res._store()
+ dummy_ex = exception.ResourceNotAvailable(resource_name=res.name)
+ res.create = mock.Mock(side_effect=dummy_ex)
+ self._assert_resource_lock(res.id, None, None)
+ res_data = {(1, True): {u'id': 5, u'name': 'A', 'attrs': {}},
+ (2, True): {u'id': 3, u'name': 'B', 'attrs': {}}}
+ self.assertRaises(exception.ResourceNotAvailable,
+ res.create_convergence, res_data,
+ 'engine-007')
+ self.assertItemsEqual([5, 3], res.requires)
self._assert_resource_lock(res.id, None, 2)
@mock.patch.object(resource.Resource, 'update')
@@ -1453,46 +1476,46 @@ class ResourceTest(common.HeatTestCase):
}}, env=self.env)
new_temp.store()
- res.update_convergence(new_temp.id, {(1, True): {},
- (1, True): {}}, 'engine-007')
+ res_data = {(1, True): {u'id': 4, u'name': 'A', 'attrs': {}},
+ (2, True): {u'id': 3, u'name': 'B', 'attrs': {}}}
+ res.update_convergence(new_temp.id, res_data, 'engine-007')
mock_update.assert_called_once_with(
new_temp.resource_definitions(self.stack)[res.name])
self.assertEqual(new_temp.id, res.current_template_id)
- self.assertEqual([1, 2], res.requires)
+ self.assertItemsEqual([3, 4], res.requires)
self._assert_resource_lock(res.id, None, 2)
def test_update_in_progress_convergence(self):
tmpl = rsrc_defn.ResourceDefinition('test_res', 'Foo')
res = generic_rsrc.GenericResource('test_res', tmpl, self.stack)
+ res.requires = [1, 2]
res._store()
rs = resource_objects.Resource.get_obj(self.stack.context, res.id)
rs.update_and_save({'engine_id': 'not-this'})
self._assert_resource_lock(res.id, 'not-this', None)
+ res_data = {(1, True): {u'id': 4, u'name': 'A', 'attrs': {}},
+ (2, True): {u'id': 3, u'name': 'B', 'attrs': {}}}
ex = self.assertRaises(resource.UpdateInProgress,
res.update_convergence,
'template_key',
- {}, 'engine-007')
+ res_data, 'engine-007')
msg = ("The resource %s is already being updated." %
res.name)
self.assertEqual(msg, six.text_type(ex))
+ # ensure requirements are not updated for failed resource
+ self.assertEqual([1, 2], res.requires)
- @mock.patch.object(resource.Resource, 'delete')
- def test_delete_convergence(self, mock_delete):
+ def test_delete_convergence(self):
tmpl = rsrc_defn.ResourceDefinition('test_res', 'Foo')
res = generic_rsrc.GenericResource('test_res', tmpl, self.stack)
res.requires = [1, 2]
res._store()
+ res.destroy = mock.Mock()
self._assert_resource_lock(res.id, None, None)
- res.delete_convergence('template_key', {(1, True): {},
- (1, True): {}},
- 'engine-007')
-
- mock_delete.assert_called_once_with()
- self.assertEqual('template_key', res.current_template_id)
- self.assertEqual([2], res.requires)
- self._assert_resource_lock(res.id, None, 2)
+ res.delete_convergence('engine-007')
+ self.assertTrue(res.destroy.called)
def test_delete_in_progress_convergence(self):
tmpl = rsrc_defn.ResourceDefinition('test_res', 'Foo')
@@ -1503,8 +1526,7 @@ class ResourceTest(common.HeatTestCase):
self._assert_resource_lock(res.id, 'not-this', None)
ex = self.assertRaises(resource.UpdateInProgress,
res.delete_convergence,
- 'template_key',
- {}, 'engine-007')
+ 'engine-007')
msg = ("The resource %s is already being updated." %
res.name)
self.assertEqual(msg, six.text_type(ex))
@@ -2263,3 +2285,127 @@ class ResourceHookTest(common.HeatTestCase):
res.has_hook = mock.Mock(return_value=False)
self.assertRaises(exception.ResourceActionNotSupported,
res.signal, {'unset_hook': 'pre-create'})
+
+
+class ResourceAvailabilityTest(common.HeatTestCase):
+ def _mock_client_plugin(self, service_types=[], is_available=True):
+ mock_client_plugin = mock.Mock()
+ mock_service_types = mock.PropertyMock(return_value=service_types)
+ type(mock_client_plugin).service_types = mock_service_types
+ mock_client_plugin.does_endpoint_exist = mock.Mock(
+ return_value=is_available)
+ return mock_service_types, mock_client_plugin
+
+ def test_default_true_with_default_client_name_none(self):
+ '''
+ When default_client_name is None, resource is considered as available.
+ '''
+ with mock.patch(('heat.tests.generic_resource'
+ '.ResourceWithDefaultClientName.default_client_name'),
+ new_callable=mock.PropertyMock) as mock_client_name:
+ mock_client_name.return_value = None
+ self.assertTrue((generic_rsrc.ResourceWithDefaultClientName.
+ is_service_available(context=mock.Mock())))
+
+ @mock.patch.object(clients.OpenStackClients, 'client_plugin')
+ def test_default_true_empty_service_types(
+ self,
+ mock_client_plugin_method):
+ '''
+ When service_types is empty list, resource is considered as available.
+ '''
+
+ mock_service_types, mock_client_plugin = self._mock_client_plugin()
+ mock_client_plugin_method.return_value = mock_client_plugin
+
+ self.assertTrue(
+ generic_rsrc.ResourceWithDefaultClientName.is_service_available(
+ context=mock.Mock()))
+ mock_client_plugin_method.assert_called_once_with(
+ generic_rsrc.ResourceWithDefaultClientName.default_client_name)
+ mock_service_types.assert_called_once_with()
+
+ @mock.patch.object(clients.OpenStackClients, 'client_plugin')
+ def test_service_deployed(
+ self,
+ mock_client_plugin_method):
+ '''
+ When the service is deployed, resource is considered as available.
+ '''
+
+ mock_service_types, mock_client_plugin = self._mock_client_plugin(
+ ['test_type']
+ )
+ mock_client_plugin_method.return_value = mock_client_plugin
+
+ self.assertTrue(
+ generic_rsrc.ResourceWithDefaultClientName.is_service_available(
+ context=mock.Mock()))
+ mock_client_plugin_method.assert_called_once_with(
+ generic_rsrc.ResourceWithDefaultClientName.default_client_name)
+ mock_service_types.assert_called_once_with()
+ mock_client_plugin.does_endpoint_exist.assert_called_once_with(
+ service_type='test_type',
+ service_name=(generic_rsrc.ResourceWithDefaultClientName
+ .default_client_name)
+ )
+
+ @mock.patch.object(clients.OpenStackClients, 'client_plugin')
+ def test_service_not_deployed(
+ self,
+ mock_client_plugin_method):
+ '''
+ When the service is not deployed, resource is considered as
+ unavailable.
+ '''
+
+ mock_service_types, mock_client_plugin = self._mock_client_plugin(
+ ['test_type_un_deployed'],
+ False
+ )
+ mock_client_plugin_method.return_value = mock_client_plugin
+
+ self.assertFalse(
+ generic_rsrc.ResourceWithDefaultClientName.is_service_available(
+ context=mock.Mock()))
+ mock_client_plugin_method.assert_called_once_with(
+ generic_rsrc.ResourceWithDefaultClientName.default_client_name)
+ mock_service_types.assert_called_once_with()
+ mock_client_plugin.does_endpoint_exist.assert_called_once_with(
+ service_type='test_type_un_deployed',
+ service_name=(generic_rsrc.ResourceWithDefaultClientName
+ .default_client_name)
+ )
+
+ def test_service_not_deployed_throws_exception(self):
+ '''
+ When the service is not deployed, make sure resource is throwing
+ StackResourceUnavailable exception.
+ '''
+ with mock.patch.object(
+ generic_rsrc.ResourceWithDefaultClientName,
+ 'is_service_available') as mock_method:
+ mock_method.return_value = False
+
+ definition = rsrc_defn.ResourceDefinition(
+ name='Test Resource',
+ resource_type=mock.Mock())
+
+ mock_stack = mock.MagicMock()
+
+ ex = self.assertRaises(
+ exception.StackResourceUnavailable,
+ generic_rsrc.ResourceWithDefaultClientName.__new__,
+ cls=generic_rsrc.ResourceWithDefaultClientName,
+ name='test_stack',
+ definition=definition,
+ stack=mock_stack)
+
+ msg = ('Service sample does not have required endpoint in service'
+ ' catalog for the resource test_stack')
+ self.assertEqual(msg,
+ six.text_type(ex),
+ 'invalid exception message')
+
+ # Make sure is_service_available is called on the right class
+ mock_method.assert_called_once_with(mock_stack.context)
diff --git a/heat/tests/test_rpc_client.py b/heat/tests/test_rpc_client.py
index e0789acc5..646d7af06 100644
--- a/heat/tests/test_rpc_client.py
+++ b/heat/tests/test_rpc_client.py
@@ -230,7 +230,8 @@ class EngineRpcAPITestCase(common.HeatTestCase):
def test_list_stack_resources(self):
self._test_engine_api('list_stack_resources', 'call',
stack_identity=self.identity,
- nested_depth=0)
+ nested_depth=0,
+ with_detail=False)
def test_stack_suspend(self):
self._test_engine_api('stack_suspend', 'call',
diff --git a/heat/tests/test_software_deployment.py b/heat/tests/test_software_deployment.py
index 92a1159af..b746092b3 100644
--- a/heat/tests/test_software_deployment.py
+++ b/heat/tests/test_software_deployment.py
@@ -163,10 +163,10 @@ class SoftwareDeploymentTest(common.HeatTestCase):
self.patchobject(sd.SoftwareDeployment, '_create_user')
self.patchobject(sd.SoftwareDeployment, '_create_keypair')
self.patchobject(sd.SoftwareDeployment, '_delete_user')
- self.patchobject(sd.SoftwareDeployment, '_delete_signed_url')
- get_signed_url = self.patchobject(
- sd.SoftwareDeployment, '_get_signed_url')
- get_signed_url.return_value = 'http://192.0.2.2/signed_url'
+ self.patchobject(sd.SoftwareDeployment, '_delete_ec2_signed_url')
+ get_ec2_signed_url = self.patchobject(
+ sd.SoftwareDeployment, '_get_ec2_signed_url')
+ get_ec2_signed_url.return_value = 'http://192.0.2.2/signed_url'
self.deployment = self.stack['deployment_mysql']
@@ -1105,7 +1105,7 @@ class SoftwareDeploymentTest(common.HeatTestCase):
self.deployment.uuid = str(uuid.uuid4())
self.deployment._delete_queue()
zc.queue.assert_called_once_with(queue_id)
- zc.queue.delete.assert_called_once()
+ self.assertTrue(zc.queue(self.deployment.uuid).delete.called)
self.assertEqual(
[mock.call('signal_queue_id')],
self.deployment.data_delete.mock_calls)
diff --git a/heat/tests/test_stack.py b/heat/tests/test_stack.py
index 685b0d29e..b94e6fe05 100644
--- a/heat/tests/test_stack.py
+++ b/heat/tests/test_stack.py
@@ -1987,7 +1987,7 @@ class StackTest(common.HeatTestCase):
db_tpl = db_api.raw_template_get(self.ctx, self.stack.t.id)
db_params = db_tpl.environment['parameters']
self.assertEqual('foo', db_params['param1'])
- self.assertEqual('oslo_decrypt_v1', db_params['param2'][0])
+ self.assertEqual('cryptography_decrypt_v1', db_params['param2'][0])
self.assertIsNotNone(db_params['param2'][1])
# Verify that loaded stack has decrypted paramters
diff --git a/heat/tests/test_stack_delete.py b/heat/tests/test_stack_delete.py
index e839b7349..c22af1092 100644
--- a/heat/tests/test_stack_delete.py
+++ b/heat/tests/test_stack_delete.py
@@ -414,7 +414,7 @@ class StackTest(common.HeatTestCase):
self.stack.delete()
- mock_rd.assert_called_once()
+ mock_rd.assert_called_once_with()
self.assertEqual((self.stack.DELETE, self.stack.FAILED),
self.stack.state)
self.assertEqual('Resource DELETE failed: Exception: '
diff --git a/heat_integrationtests/README.rst b/heat_integrationtests/README.rst
index 3d77d09e1..047788874 100644
--- a/heat_integrationtests/README.rst
+++ b/heat_integrationtests/README.rst
@@ -9,7 +9,7 @@ To run the tests against DevStack, do the following:
# source DevStack credentials
- source /opt/stack/devstack/accrc/demo/demo
+ source /opt/stack/devstack/openrc
# run the heat integration tests with those credentials
diff --git a/heat_integrationtests/functional/test_reload_on_sighup.py b/heat_integrationtests/functional/test_reload_on_sighup.py
new file mode 100644
index 000000000..f987882c0
--- /dev/null
+++ b/heat_integrationtests/functional/test_reload_on_sighup.py
@@ -0,0 +1,98 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import eventlet
+
+from oslo_concurrency import processutils
+from six.moves import configparser
+
+from heat_integrationtests.common import test
+
+
+class ReloadOnSighupTest(test.HeatIntegrationTest):
+
+ def setUp(self):
+ self.config_file = "/etc/heat/heat.conf"
+ super(ReloadOnSighupTest, self).setUp()
+
+ def _set_config_value(self, service, key, value):
+ config = configparser.ConfigParser()
+ config.read(self.config_file)
+ config.set(service, key, value)
+ with open(self.config_file, 'wb') as f:
+ config.write(f)
+
+ def _get_config_value(self, service, key):
+ config = configparser.ConfigParser()
+ config.read(self.config_file)
+ val = config.get(service, key)
+ return val
+
+ def _get_heat_api_pids(self, service):
+ # get the pids of all heat-api processes
+ if service == "heat_api":
+ process = "heat-api|grep -Ev 'grep|cloudwatch|cfn'"
+ else:
+ process = "%s|grep -Ev 'grep'" % service.replace('_', '-')
+ cmd = "ps -ef|grep %s|awk '{print $2}'" % process
+ out, err = processutils.execute(cmd, shell=True)
+ self.assertIsNotNone(out, "heat-api service not running. %s" % err)
+ pids = filter(None, out.split('\n'))
+
+ # get the parent pids of all heat-api processes
+ cmd = "ps -ef|grep %s|awk '{print $3}'" % process
+ out, _ = processutils.execute(cmd, shell=True)
+ parent_pids = filter(None, out.split('\n'))
+
+ heat_api_parent = list(set(pids) & set(parent_pids))[0]
+ heat_api_children = list(set(pids) - set(parent_pids))
+
+ return heat_api_parent, heat_api_children
+
+ def _change_config(self, service, old_workers, new_workers):
+ pre_reload_parent, pre_reload_children = self._get_heat_api_pids(
+ service)
+ self.assertEqual(old_workers, len(pre_reload_children))
+
+ # change the config values
+ self._set_config_value(service, 'workers', new_workers)
+ cmd = "kill -HUP %s" % pre_reload_parent
+ processutils.execute(cmd, shell=True)
+ # wait till heat-api reloads
+ eventlet.sleep(2)
+
+ post_reload_parent, post_reload_children = self._get_heat_api_pids(
+ service)
+ self.assertEqual(pre_reload_parent, post_reload_parent)
+ self.assertEqual(new_workers, len(post_reload_children))
+ # test if all child processes are newly created
+ self.assertEqual(set(post_reload_children) & set(pre_reload_children),
+ set())
+
+ def _reload(self, service):
+ old_workers = int(self._get_config_value(service, 'workers'))
+ new_workers = old_workers + 1
+ self.addCleanup(self._set_config_value, service, 'workers',
+ old_workers)
+
+ self._change_config(service, old_workers, new_workers)
+ # revert all the changes made
+ self._change_config(service, new_workers, old_workers)
+
+ def test_api_reload_on_sighup(self):
+ self._reload('heat_api')
+
+ def test_api_cfn_reload_on_sighup(self):
+ self._reload('heat_api_cfn')
+
+ def test_api_cloudwatch_on_sighup(self):
+ self._reload('heat_api_cloudwatch')
diff --git a/heat_integrationtests/functional/test_stack_tags.py b/heat_integrationtests/functional/test_stack_tags.py
index a183d2500..cdcdcd3f4 100644
--- a/heat_integrationtests/functional/test_stack_tags.py
+++ b/heat_integrationtests/functional/test_stack_tags.py
@@ -27,7 +27,7 @@ description:
def test_stack_tag(self):
# Stack create with stack tags
- tags = ['foo', 'bar']
+ tags = 'foo,bar'
stack_identifier = self.stack_create(
template=self.template,
tags=tags
@@ -35,10 +35,10 @@ description:
# Ensure property tag is populated and matches given tags
stack = self.client.stacks.get(stack_identifier)
- self.assertEqual(tags, stack.tags)
+ self.assertEqual(['foo', 'bar'], stack.tags)
# Update tags
- updated_tags = ['tag1', 'tag2']
+ updated_tags = 'tag1,tag2'
self.update_stack(
stack_identifier,
template=self.template,
@@ -46,7 +46,7 @@ description:
# Ensure property tag is populated and matches updated tags
updated_stack = self.client.stacks.get(stack_identifier)
- self.assertEqual(updated_tags, updated_stack.tags)
+ self.assertEqual(['tag1', 'tag2'], updated_stack.tags)
# Delete tags
self.update_stack(
@@ -60,7 +60,7 @@ description:
def test_hidden_stack(self):
# Stack create with hidden stack tag
- tags = ['foo', 'hidden']
+ tags = 'foo,hidden'
self.stack_create(
template=self.template,
tags=tags)
diff --git a/heat_integrationtests/functional/test_validation.py b/heat_integrationtests/functional/test_validation.py
index 6a65091bf..1111ea819 100644
--- a/heat_integrationtests/functional/test_validation.py
+++ b/heat_integrationtests/functional/test_validation.py
@@ -38,6 +38,8 @@ parameters:
type: string
image:
type: string
+ network:
+ type: string
resources:
config:
type: My::Config
@@ -50,7 +52,9 @@ resources:
image: {get_param: image}
flavor: {get_param: flavor}
key_name: {get_param: keyname}
+ networks: [{network: {get_param: network} }]
user_data_format: SOFTWARE_CONFIG
+
'''
config_template = '''
heat_template_version: 2014-10-16
@@ -74,7 +78,8 @@ resources:
{'My::Config': 'provider.yaml'}}
parameters = {'keyname': self.keypair_name,
'flavor': self.conf.minimal_instance_type,
- 'image': self.conf.minimal_image_ref}
+ 'image': self.conf.minimal_image_ref,
+ 'network': self.conf.fixed_network_name}
# Note we don't wait for CREATE_COMPLETE, because we're using a
# minimal image without the tools to apply the config.
# The point of the test is just to prove that validation won't
diff --git a/heat_integrationtests/post_test_hook.sh b/heat_integrationtests/post_test_hook.sh
index 7919d38cf..34b6ae28c 100755
--- a/heat_integrationtests/post_test_hook.sh
+++ b/heat_integrationtests/post_test_hook.sh
@@ -17,8 +17,8 @@
set -x
export DEST=${DEST:-/opt/stack/new}
-source $DEST/devstack/accrc/admin/admin
+source $DEST/devstack/openrc admin admin
sudo -E $DEST/heat/heat_integrationtests/prepare_test_env.sh
sudo -E $DEST/heat/heat_integrationtests/prepare_test_network.sh
-source $DEST/devstack/accrc/demo/demo
+source $DEST/devstack/openrc
sudo -E tox -eintegration
diff --git a/heat_integrationtests/pre_test_hook.sh b/heat_integrationtests/pre_test_hook.sh
index 37550f29a..ef5472eaa 100755
--- a/heat_integrationtests/pre_test_hook.sh
+++ b/heat_integrationtests/pre_test_hook.sh
@@ -24,7 +24,11 @@ echo -e 'notification_driver=messagingv2\n' >> $localconf
echo -e 'num_engine_workers=2\n' >> $localconf
echo -e 'plugin_dirs=$HEAT_DIR/heat_integrationtests/common/test_resources\n' >> $localconf
echo -e 'hidden_stack_tags=hidden\n' >> $localconf
+echo -e '[heat_api]\nworkers=1\n' >> $localconf
+echo -e '[heat_api_cfn]\nworkers=1\n' >> $localconf
+echo -e '[heat_api_cloudwatch]\nworkers=1' >> $localconf
if [ "$ENABLE_CONVERGENCE" == "true" ] ; then
echo -e 'convergence_engine=true\n' >> $localconf
-fi \ No newline at end of file
+fi
+
diff --git a/heat_integrationtests/prepare_test_network.sh b/heat_integrationtests/prepare_test_network.sh
index 657ce7575..41f42bab5 100755
--- a/heat_integrationtests/prepare_test_network.sh
+++ b/heat_integrationtests/prepare_test_network.sh
@@ -16,12 +16,12 @@
set -x
-source $DEST/devstack/accrc/admin/admin
+source $DEST/devstack/openrc admin admin
PUB_SUBNET_ID=`neutron subnet-list | grep ' public-subnet ' | awk '{split($0,a,"|"); print a[2]}'`
ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id="${PUB_SUBNET_ID//[[:space:]]/}" '$4 == subnet_id { print $8; }'`
# create a heat specific private network (default 'private' network has ipv6 subnet)
-source $DEST/devstack/accrc/demo/demo
+source $DEST/devstack/openrc
HEAT_PRIVATE_SUBNET_CIDR=10.0.5.0/24
neutron net-create heat-net
neutron subnet-create --name heat-subnet heat-net $HEAT_PRIVATE_SUBNET_CIDR
diff --git a/heat_integrationtests/requirements.txt b/heat_integrationtests/requirements.txt
index 0249c9b29..da8673bff 100644
--- a/heat_integrationtests/requirements.txt
+++ b/heat_integrationtests/requirements.txt
@@ -5,6 +5,7 @@ pbr<2.0,>=0.11
kombu>=3.0.7
oslo.log>=1.2.0 # Apache-2.0
oslo.messaging!=1.12.0,>=1.8.0 # Apache-2.0
+oslo.concurrency>=2.1.0
oslo.config>=1.11.0 # Apache-2.0
oslo.utils>=1.6.0 # Apache-2.0
paramiko>=1.13.0
diff --git a/heat_upgradetests/post_test_hook.sh b/heat_upgradetests/post_test_hook.sh
new file mode 100755
index 000000000..e69de29bb
--- /dev/null
+++ b/heat_upgradetests/post_test_hook.sh
diff --git a/heat_upgradetests/pre_test_hook.sh b/heat_upgradetests/pre_test_hook.sh
new file mode 100755
index 000000000..e69de29bb
--- /dev/null
+++ b/heat_upgradetests/pre_test_hook.sh
diff --git a/requirements.txt b/requirements.txt
index 63c4913ba..99a99cfe4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,6 +4,7 @@
pbr<2.0,>=0.11
Babel>=1.3
+cryptography>=0.8.2 # Apache-2.0
dogpile.cache>=0.5.3
eventlet>=0.17.4
greenlet>=0.3.2
@@ -14,19 +15,20 @@ kombu>=3.0.7
lxml>=2.3
netaddr>=0.7.12
oslo.config>=1.11.0 # Apache-2.0
-oslo.concurrency>=2.0.0 # Apache-2.0
+oslo.concurrency>=2.1.0 # Apache-2.0
oslo.context>=0.2.0 # Apache-2.0
-oslo.db>=1.10.0 # Apache-2.0
+oslo.db>=1.12.0 # Apache-2.0
oslo.i18n>=1.5.0 # Apache-2.0
oslo.log>=1.2.0 # Apache-2.0
oslo.messaging!=1.12.0,>=1.8.0 # Apache-2.0
oslo.middleware!=2.0.0,>=1.2.0 # Apache-2.0
oslo.policy>=0.5.0 # Apache-2.0
+oslo.reports>=0.1.0 # Apache-2.0
oslo.serialization>=1.4.0 # Apache-2.0
oslo.service>=0.1.0 # Apache-2.0
oslo.utils>=1.6.0 # Apache-2.0
osprofiler>=0.3.0 # Apache-2.0
-oslo.versionedobjects>=0.3.0
+oslo.versionedobjects!=0.5.0,>=0.3.0
PasteDeploy>=1.5.0
posix-ipc
pycrypto>=2.6
@@ -47,7 +49,7 @@ python-zaqarclient>=0.1.1
PyYAML>=3.1.0
qpid-python;python_version=='2.7'
requests>=2.5.2
-Routes!=2.0,>=1.12.3
+Routes!=2.0,!=2.1,>=1.12.3
six>=1.9.0
SQLAlchemy<1.1.0,>=0.9.7
sqlalchemy-migrate>=0.9.6
diff --git a/setup.cfg b/setup.cfg
index c51ee2e21..47de27b12 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -50,6 +50,7 @@ heat.clients =
barbican = heat.engine.clients.os.barbican:BarbicanClientPlugin
ceilometer = heat.engine.clients.os.ceilometer:CeilometerClientPlugin
cinder = heat.engine.clients.os.cinder:CinderClientPlugin
+ designate = heat.engine.clients.os.designate:DesignateClientPlugin
glance = heat.engine.clients.os.glance:GlanceClientPlugin
heat = heat.engine.clients.os.heat_plugin:HeatClientPlugin
keystone = heat.engine.clients.os.keystone:KeystoneClientPlugin
@@ -90,6 +91,7 @@ heat.constraints =
manila.share_snapshot = heat.engine.clients.os.manila:ManilaShareSnapshotConstraint
manila.share_network = heat.engine.clients.os.manila:ManilaShareNetworkConstraint
manila.share_type = heat.engine.clients.os.manila:ManilaShareTypeConstraint
+ designate.domain = heat.engine.clients.os.designate:DesignateDomainConstraint
heat.stack_lifecycle_plugins =
diff --git a/tox.ini b/tox.ini
index 1305101b4..d1da7d992 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,7 +7,6 @@ skipsdist = True
# Note the hash seed is set to 0 until heat can be tested with a
# random hash seed successfully.
setenv = VIRTUAL_ENV={envdir}
- OS_TEST_DBAPI_ADMIN_CONNECTION=mysql+pymysql://openstack_citest:openstack_citest@localhost/;postgresql://openstack_citest:openstack_citest@localhost/postgres;sqlite://
PYTHONHASHSEED=0
usedevelop = True
install_command = pip install {opts} {packages}