summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst24
-rw-r--r--README.rst11
-rw-r--r--api-ref/source/v1/parameters.yaml6
-rw-r--r--api-ref/source/v1/stacks.inc7
-rw-r--r--doc/source/contributing/blueprints.rst69
-rw-r--r--doc/source/developing_guides/pluginguide.rst10
-rw-r--r--doc/source/man/heat-db-setup.rst5
-rw-r--r--doc/source/man/heat-keystone-setup-domain.rst4
-rw-r--r--doc/source/man/heat-keystone-setup.rst5
-rw-r--r--doc/source/man/heat-manage.rst4
-rw-r--r--heat/api/middleware/fault.py1
-rw-r--r--heat/api/openstack/v1/stacks.py25
-rw-r--r--heat/common/exception.py4
-rw-r--r--heat/engine/clients/os/blazar.py42
-rw-r--r--heat/engine/clients/os/swift.py38
-rw-r--r--heat/engine/clients/os/zun.py69
-rw-r--r--heat/engine/clients/progress.py34
-rw-r--r--heat/engine/constraint/common_constraints.py24
-rw-r--r--heat/engine/resources/openstack/magnum/bay.py13
-rw-r--r--heat/engine/resources/openstack/magnum/baymodel.py15
-rw-r--r--heat/engine/resources/openstack/nova/floatingip.py22
-rw-r--r--heat/engine/resources/openstack/zun/container.py173
-rw-r--r--heat/engine/service.py186
-rw-r--r--heat/engine/stack.py107
-rw-r--r--heat/engine/template_files.py19
-rw-r--r--heat/engine/worker.py8
-rw-r--r--heat/rpc/client.py45
-rw-r--r--heat/tests/api/cfn/test_api_cfn_v1.py21
-rw-r--r--heat/tests/api/openstack_v1/test_stacks.py66
-rw-r--r--heat/tests/clients/test_blazar_client.py42
-rw-r--r--heat/tests/clients/test_zun_client.py30
-rw-r--r--heat/tests/constraints/test_common_constraints.py28
-rw-r--r--heat/tests/convergence/framework/worker_wrapper.py3
-rw-r--r--heat/tests/engine/service/test_service_engine.py11
-rw-r--r--heat/tests/engine/service/test_stack_action.py8
-rw-r--r--heat/tests/engine/service/test_stack_create.py92
-rw-r--r--heat/tests/engine/service/test_stack_events.py5
-rw-r--r--heat/tests/engine/service/test_stack_update.py61
-rw-r--r--heat/tests/engine/test_engine_worker.py2
-rw-r--r--heat/tests/engine/test_resource_type.py6
-rw-r--r--heat/tests/openstack/manila/test_share_network.py9
-rw-r--r--heat/tests/openstack/neutron/test_neutron_port.py6
-rw-r--r--heat/tests/openstack/neutron/test_neutron_security_group_rule.py2
-rw-r--r--heat/tests/openstack/zun/test_container.py195
-rw-r--r--heat/tests/test_convg_stack.py32
-rw-r--r--heat/tests/test_engine_service.py9
-rw-r--r--heat/tests/test_rpc_client.py5
-rw-r--r--lower-constraints.txt7
-rw-r--r--releasenotes/notes/add-blazar-client-plugin-e2077e8646ca5f1a.yaml4
-rw-r--r--releasenotes/notes/add-networks-properties-to-container-resource-b1936d7e113aafbc.yaml7
-rw-r--r--releasenotes/notes/convergence-delete-race-5b821bbd4c5ba5dc.yaml11
-rw-r--r--releasenotes/notes/hidden-resources-529934d5eb519c86.yaml10
-rw-r--r--releasenotes/notes/legacy-client-races-ba7a60cef5ec1694.yaml12
-rw-r--r--releasenotes/notes/migrate-to-storyboard-36c90857d3167322.yaml10
-rw-r--r--releasenotes/notes/support-files-from-swift-f27fd2d788d67580.yaml6
-rw-r--r--requirements.txt4
-rw-r--r--setup.cfg1
-rw-r--r--test-requirements.txt4
-rw-r--r--tools/dashboards/heat.dash7
59 files changed, 1365 insertions, 321 deletions
diff --git a/HACKING.rst b/HACKING.rst
index 9b0cadee5..618cfa309 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -24,22 +24,24 @@ infrastructure in OpenStack Heat, please read heat/tests/testing-overview.txt.
Running tests
-------------
-The testing system is based on a combination of tox and testr. The canonical
-approach to running tests is to simply run the command `tox`. This will
+The testing system is based on a combination of tox and stestr. The canonical
+approach to running tests is to simply run the command ``tox``. This will
create virtual environments, populate them with dependencies and run all of
the tests that OpenStack CI systems run. Behind the scenes, tox is running
-`testr run --parallel`, but is set up such that you can supply any additional
-testr arguments that are needed to tox. For example, you can run:
-`tox -- --analyze-isolation` to cause tox to tell testr to add
---analyze-isolation to its argument list.
+``stestr run``, but is set up such that you can supply any additional
+stestr arguments that are needed to tox. For example, you can run:
+``tox -- --analyze-isolation`` to cause tox to tell stestr to add
+``--analyze-isolation`` to its argument list.
It is also possible to run the tests inside of a virtual environment
you have created, or it is possible that you have all of the dependencies
-installed locally already. In this case, you can interact with the testr
-command directly. Running `testr run` will run the entire test suite. `testr
-run --parallel` will run it in parallel (this is the default incantation tox
-uses.) More information about testr can be found at:
-http://wiki.openstack.org/testr
+installed locally already. In this case, you can interact with the ``stestr``
+command directly. Running ``stestr run`` will run the entire test suite in
+as many threads as you have CPU cores (this is the default incantation tox
+uses), number of threads can be adjusted with ``--concurrency N`` argument.
+``testr run --serial`` will run tests in serial process.
+More information about stestr can be found at:
+http://stestr.readthedocs.io
Note that unit tests use a database if available. See
``tools/test-setup.sh`` on how to set up the databases the same way as
diff --git a/README.rst b/README.rst
index 0264a6f50..9eb9fd3d8 100644
--- a/README.rst
+++ b/README.rst
@@ -35,6 +35,16 @@ Python client
* Documentation: https://docs.openstack.org/python-heatclient/latest
* Source: https://git.openstack.org/cgit/openstack/python-heatclient
+Report a Story (a bug/blueprint)
+--------------------------------
+
+If you'd like to report a Story (we used to call a bug/blueprint), you can
+report it under Report a story in
+`Heat's StoryBoard <https://storyboard.openstack.org/#!/project/989>`_.
+If you must report the story under other sub-project of heat, you can find
+them all in `Heat StoryBoard Group <https://storyboard.openstack.org/#!/project_group/82>`_.
+if you encounter any issue.
+
References
----------
* https://docs.amazonwebservices.com/AWSCloudFormation/latest/APIReference/API_CreateStack.html
@@ -62,3 +72,4 @@ We have integration with
* https://git.openstack.org/cgit/openstack/python-zaqarclient (messaging service)
* https://git.openstack.org/cgit/openstack/python-monascaclient (monitoring service)
* https://git.openstack.org/cgit/openstack/python-zunclient (container management service)
+* https://git.openstack.org/cgit/openstack/python-blazarclient (reservation service)
diff --git a/api-ref/source/v1/parameters.yaml b/api-ref/source/v1/parameters.yaml
index 7c657efe7..d61866794 100644
--- a/api-ref/source/v1/parameters.yaml
+++ b/api-ref/source/v1/parameters.yaml
@@ -787,6 +787,12 @@ files:
in: body
required: false
type: object
+files_container:
+ description: |
+ Name of the container in swift that has child templates and environment files.
+ in: body
+ required: false
+ type: string
function_description:
description: |
The textual description about the usage of the template function.
diff --git a/api-ref/source/v1/stacks.inc b/api-ref/source/v1/stacks.inc
index bfca766d5..8cf5b7889 100644
--- a/api-ref/source/v1/stacks.inc
+++ b/api-ref/source/v1/stacks.inc
@@ -34,6 +34,7 @@ Request Parameters
- environment: environment
- environment_files: environment_files
- files: files
+ - files_container: files_container
- parameters: parameters
- stack_name: stack_name_request
- tags: tags
@@ -94,6 +95,8 @@ Request Parameters
- tenant_id: tenant_id
- disable_rollback: disable_rollback
- environment: environment
+ - environment_files: environment_files
+ - files_container: files_container
- files: files
- parameters: parameters
- stack_name: stack_name_request
@@ -368,6 +371,7 @@ Request Parameters
- disable_rollback: disable_rollback
- environment: environment
- environment_files: environment_files
+ - files_container: files_container
- existing: existing
- files: files
- parameters: parameters
@@ -435,6 +439,7 @@ Request Parameters
- disable_rollback: disable_rollback
- environment: environment
- environment_files: environment_files
+ - files_container: files_container
- existing: existing_patch
- files: files
- parameters: parameters
@@ -500,6 +505,7 @@ Request Parameters
- disable_rollback: disable_rollback
- environment: environment
- environment_files: environment_files
+ - files_container: files_container
- existing: existing
- files: files
- parameters: parameters
@@ -571,6 +577,7 @@ Request Parameters
- disable_rollback: disable_rollback
- environment: environment
- environment_files: environment_files
+ - files_container: files_container
- existing: existing_patch
- files: files
- parameters: parameters
diff --git a/doc/source/contributing/blueprints.rst b/doc/source/contributing/blueprints.rst
index a860438b8..087178f89 100644
--- a/doc/source/contributing/blueprints.rst
+++ b/doc/source/contributing/blueprints.rst
@@ -11,9 +11,14 @@ Please note that we use a template for spec submissions. Please use the
<https://git.openstack.org/cgit/openstack/heat-specs/tree/specs/templates>`_.
It is not required to fill out all sections in the template.
+You have to create a Story in StoryBoard `heat storyboard
+<https://storyboard.openstack.org/#!/project/989>`_. And create tasks that
+fit with the plan to implement this spec (A task to link to a patch in gerrit).
+
Spec Notes
----------
+
There are occasions when a spec is approved and the code does not land in
the cycle it was targeted for. For these cases, the workflow to get the spec
into the next release is as below:
@@ -29,67 +34,51 @@ priorities.
Heat Spec Lite
--------------
-Lite specs are small feature requests tracked as Launchpad bugs, with status
-'Wishlist' and tagged with 'spec-lite' tag. These allow for submission and
-review of these feature requests before code is submitted.
+Lite specs are small feature requests tracked as StoryBoard stories, and tagged
+with 'spec-lite' and 'priority-wishlist' tag. These allow for submission
+and review of these feature requests before code is submitted.
These can be used for small features that don’t warrant a detailed spec to be
proposed, evaluated, and worked on. The team evaluates these requests as it
evaluates specs.
-Once a `spec-lite` bug has been approved/triaged as a
+Once a `spec-lite` story has been approved/triaged as a
Request for Enhancement(RFE), it’ll be targeted for a release.
-The workflow for the life of a spec-lite in Launchpad is as follows:
+The workflow for the life of a spec-lite in StoryBoard is as follows:
-* File a bug with a small summary of what the requested change is and
- tag it as `spec-lite`.
-* The bug is triaged and importance changed to `Wishlist`.
-* The bug is evaluated and marked as `Triaged` to announce approval or
- to `Won't fix` to announce rejection or `Invalid` to request a full
- spec.
-* The bug is moved to `In Progress` once the code is up and ready to
+* File a story with a small summary of what the requested change is and
+ tag it as `spec-lite` and `priority-wishlist`.
+* Create tasks that fit to your plan in story.
+* The story is evaluated and marked with tag as `triaged` to announce
+ approval or `Invalid` to request a full spec or it's not a valided task.
+* The task is moved to `Progress` once the code is up and ready to
review.
-* The bug is moved to `Fix Committed` once the patch lands.
-
-In summary:
+* The task is moved to `Merged` once the patch lands.
+* The story is moved to `Merged` once all tasks merged.
-+--------------+-----------------------------------------------------------------------------+
-|State | Meaning |
-+==============+=============================================================================+
-|New | This is where spec-lite starts, as filed by the community. |
-+--------------+-----------------------------------------------------------------------------+
-|Triaged | Drivers - Move to this state to mean, "you can start working on it" |
-+--------------+-----------------------------------------------------------------------------+
-|Won't Fix | Drivers - Move to this state to reject a lite-spec. |
-+--------------+-----------------------------------------------------------------------------+
-|Invalid | Drivers - Move to this state to request a full spec for this request |
-+--------------+-----------------------------------------------------------------------------+
+The drivers team will discuss the following story reports in IRC meetings:
-The drivers team will discuss the following bug reports in IRC meetings:
-
-* `heat RFE's <https://bugs.launchpad.net/heat/+bugs?field.status%3Alist=NEW&field.tag=spec-lite>`_
-* `python-heatclient RFE's <https://bugs.launchpad.net/python-heatclient/+bugs?field.status%3Alist=NEW&field.tag=spec-lite>`_
+* `heat stories <https://storyboard.openstack.org/#!/project_group/82>`_
+* `heat story filter <https://storyboard.openstack.org/#!/board/71>`_
Lite spec Submission Guidelines
-------------------------------
-When a bug is submitted, there are two fields that must be filled: ‘summary’
-and ‘further information’. The ‘summary’ must be brief enough to fit in one
-line.
+When a story is submitted, there is field that must be filled: ‘Description’.
-The ‘further information’ section must be a description of what you would like
+The ‘Description’ section must be a description of what you would like
to see implemented in heat. The description should provide enough details for
a knowledgeable developer to understand what is the existing problem and
what’s the proposed solution.
-Add `spec-lite` tag to the bug.
+Add `spec-lite` tag to the story.
-Lite spec from existing bugs
-----------------------------
+Lite spec from existing stories
+-------------------------------
-If there's an already existing bug that describes a small feature suitable for
-a spec-lite, add a `spec-lite` tag to the bug. There is no need to create a new
-bug. The comments and history of the existing bug are important for it's review.
+If there's an already existing story that describes a small feature suitable for
+a spec-lite, add a `spec-lite` tag to the story. There is no need to create a new
+story. The comments and history of the existing story are important for its review.
diff --git a/doc/source/developing_guides/pluginguide.rst b/doc/source/developing_guides/pluginguide.rst
index 3d1c3630d..eb69449ac 100644
--- a/doc/source/developing_guides/pluginguide.rst
+++ b/doc/source/developing_guides/pluginguide.rst
@@ -488,6 +488,16 @@ that updates require the engine to delete and re-create the resource
have values of ``None``
:type prop_diff: collections.Mapping
+ *Note* Before calling ``handle_update`` we check whether need to replace
+ the resource, especially for resource in ``*_FAILED`` state, there is a
+ default implementation of ``needs_replace_failed`` in
+ ``heat.engine.resource.Resource`` that simply returns ``True`` indicating
+ that updates require replacement. And we override the implementation for
+ ``OS::Nova::Server``, ``OS::Cinder::Volume`` and all of neutron resources.
+ The base principle is that to check whether the resource exists underlying
+ and whether the real status is available. So override the method
+ ``needs_replace_failed`` for your resource plug-ins if needed.
+
.. py:function:: check_update_complete(self, token)
If defined, will be called with the return value of ``handle_update``
diff --git a/doc/source/man/heat-db-setup.rst b/doc/source/man/heat-db-setup.rst
index 905698a2d..e04388cff 100644
--- a/doc/source/man/heat-db-setup.rst
+++ b/doc/source/man/heat-db-setup.rst
@@ -67,5 +67,6 @@ EXAMPLES
BUGS
====
-Heat bugs are managed through Launchpad
-`OpenStack Heat Bugs <https://bugs.launchpad.net/heat>`__
+
+Heat bugs are managed through StoryBoard
+`OpenStack Heat Stories <https://storyboard.openstack.org/#!/project/989>`__
diff --git a/doc/source/man/heat-keystone-setup-domain.rst b/doc/source/man/heat-keystone-setup-domain.rst
index 5b194c863..5fc42ed5c 100644
--- a/doc/source/man/heat-keystone-setup-domain.rst
+++ b/doc/source/man/heat-keystone-setup-domain.rst
@@ -103,5 +103,5 @@ EXAMPLES
BUGS
====
-Heat bugs are managed through Launchpad
-`OpenStack Heat Bugs <https://bugs.launchpad.net/heat>`__
+Heat bugs are managed through StoryBoard
+`OpenStack Heat Stories <https://storyboard.openstack.org/#!/project/989>`__
diff --git a/doc/source/man/heat-keystone-setup.rst b/doc/source/man/heat-keystone-setup.rst
index a9afc2325..1b866b427 100644
--- a/doc/source/man/heat-keystone-setup.rst
+++ b/doc/source/man/heat-keystone-setup.rst
@@ -30,5 +30,6 @@ EXAMPLES
BUGS
====
-Heat bugs are managed through Launchpad
-`OpenStack Heat Bugs <https://bugs.launchpad.net/heat>`__
+
+Heat bugs are managed through StoryBoard
+`OpenStack Heat Stories <https://storyboard.openstack.org/#!/project/989>`__
diff --git a/doc/source/man/heat-manage.rst b/doc/source/man/heat-manage.rst
index e9ad40a7d..d29f11ab2 100644
--- a/doc/source/man/heat-manage.rst
+++ b/doc/source/man/heat-manage.rst
@@ -73,5 +73,5 @@ connection and logging.
BUGS
====
-* Heat issues are tracked in Launchpad so you can view or report bugs here
- `OpenStack Heat Bugs <https://bugs.launchpad.net/heat>`__
+Heat bugs are managed through StoryBoard
+`OpenStack Heat Stories <https://storyboard.openstack.org/#!/project/989>`__
diff --git a/heat/api/middleware/fault.py b/heat/api/middleware/fault.py
index 2e977393a..a190ff84d 100644
--- a/heat/api/middleware/fault.py
+++ b/heat/api/middleware/fault.py
@@ -82,6 +82,7 @@ class FaultWrapper(wsgi.Middleware):
'MissingCredentialError': webob.exc.HTTPBadRequest,
'UserParameterMissing': webob.exc.HTTPBadRequest,
'RequestLimitExceeded': webob.exc.HTTPBadRequest,
+ 'DownloadLimitExceeded': webob.exc.HTTPBadRequest,
'Invalid': webob.exc.HTTPBadRequest,
'ResourcePropertyConflict': webob.exc.HTTPBadRequest,
'PropertyUnspecifiedError': webob.exc.HTTPBadRequest,
diff --git a/heat/api/openstack/v1/stacks.py b/heat/api/openstack/v1/stacks.py
index cbbbded38..d144ea197 100644
--- a/heat/api/openstack/v1/stacks.py
+++ b/heat/api/openstack/v1/stacks.py
@@ -50,6 +50,7 @@ class InstantiationData(object):
PARAM_ENVIRONMENT,
PARAM_FILES,
PARAM_ENVIRONMENT_FILES,
+ PARAM_FILES_CONTAINER
) = (
'stack_name',
'template',
@@ -58,6 +59,7 @@ class InstantiationData(object):
'environment',
'files',
'environment_files',
+ 'files_container'
)
def __init__(self, data, patch=False):
@@ -157,6 +159,9 @@ class InstantiationData(object):
def environment_files(self):
return self.data.get(self.PARAM_ENVIRONMENT_FILES, None)
+ def files_container(self):
+ return self.data.get(self.PARAM_FILES_CONTAINER, None)
+
def args(self):
"""Get any additional arguments supplied by the user."""
params = self.data.items()
@@ -369,8 +374,8 @@ class StackController(object):
data.environment(),
data.files(),
args,
- environment_files=data.environment_files()
- )
+ environment_files=data.environment_files(),
+ files_container=data.files_container())
formatted_stack = stacks_view.format_stack(req, result)
return {'stack': formatted_stack}
@@ -403,7 +408,8 @@ class StackController(object):
data.environment(),
data.files(),
args,
- environment_files=data.environment_files())
+ environment_files=data.environment_files(),
+ files_container=data.files_container())
formatted_stack = stacks_view.format_stack(
req,
@@ -486,7 +492,8 @@ class StackController(object):
data.environment(),
data.files(),
args,
- environment_files=data.environment_files())
+ environment_files=data.environment_files(),
+ files_container=data.files_container())
raise exc.HTTPAccepted()
@@ -507,7 +514,8 @@ class StackController(object):
data.environment(),
data.files(),
args,
- environment_files=data.environment_files())
+ environment_files=data.environment_files(),
+ files_container=data.files_container())
raise exc.HTTPAccepted()
@@ -535,7 +543,8 @@ class StackController(object):
data.environment(),
data.files(),
args,
- environment_files=data.environment_files())
+ environment_files=data.environment_files(),
+ files_container=data.files_container())
return {'resource_changes': changes}
@@ -555,7 +564,8 @@ class StackController(object):
data.environment(),
data.files(),
args,
- environment_files=data.environment_files())
+ environment_files=data.environment_files(),
+ files_container=data.files_container())
return {'resource_changes': changes}
@@ -616,6 +626,7 @@ class StackController(object):
data.environment(),
files=data.files(),
environment_files=data.environment_files(),
+ files_container=data.files_container(),
show_nested=show_nested,
ignorable_errors=ignorable_errors)
diff --git a/heat/common/exception.py b/heat/common/exception.py
index 02d817b08..a60981339 100644
--- a/heat/common/exception.py
+++ b/heat/common/exception.py
@@ -478,6 +478,10 @@ class RequestLimitExceeded(HeatException):
msg_fmt = _('Request limit exceeded: %(message)s')
+class DownloadLimitExceeded(HeatException):
+ msg_fmt = _('Permissible download limit exceeded: %(message)s')
+
+
class StackResourceLimitExceeded(HeatException):
msg_fmt = _('Maximum resources per stack exceeded.')
diff --git a/heat/engine/clients/os/blazar.py b/heat/engine/clients/os/blazar.py
new file mode 100644
index 000000000..3095d4651
--- /dev/null
+++ b/heat/engine/clients/os/blazar.py
@@ -0,0 +1,42 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from blazarclient import client as blazar_client
+from keystoneauth1.exceptions import http as ks_exc
+
+from heat.engine.clients import client_plugin
+
+CLIENT_NAME = 'blazar'
+
+
+class BlazarClientPlugin(client_plugin.ClientPlugin):
+
+ service_types = [RESERVATION] = ['reservation']
+
+ def _create(self, version=None):
+ interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
+ args = {
+ 'session': self.context.keystone_session,
+ 'service_type': self.RESERVATION,
+ 'interface': interface,
+ 'region_name': self._get_region_name(),
+ }
+
+ client = blazar_client.Client(**args)
+ return client
+
+ def is_not_found(self, exc):
+ return isinstance(exc, ks_exc.NotFound)
+
+ def has_host(self):
+ return True if self.client().host.list() else False
diff --git a/heat/engine/clients/os/swift.py b/heat/engine/clients/os/swift.py
index edddaeb18..1edf378f0 100644
--- a/heat/engine/clients/os/swift.py
+++ b/heat/engine/clients/os/swift.py
@@ -18,12 +18,15 @@ import logging
import random
import time
+from oslo_config import cfg
import six
from six.moves.urllib import parse
from swiftclient import client as sc
from swiftclient import exceptions
from swiftclient import utils as swiftclient_utils
+from heat.common import exception
+from heat.common.i18n import _
from heat.engine.clients import client_plugin
IN_PROGRESS = 'in progress'
@@ -137,3 +140,38 @@ class SwiftClientPlugin(client_plugin.ClientPlugin):
# according to RFC 2616, all HTTP time headers must be
# in GMT time, so create an offset-naive UTC datetime
return datetime.datetime(*pd)
+
+ def get_files_from_container(self, files_container, files_to_skip=None):
+ """Gets the file contents from a container.
+
+ Get the file contents from the container in a files map. A list
+ of files to skip can also be specified and those would not be
+ downloaded from swift.
+ """
+ client = self.client()
+ files = {}
+
+ if files_to_skip is None:
+ files_to_skip = []
+
+ try:
+ headers, objects = client.get_container(files_container)
+ bytes_used = int(headers.get('x-container-bytes-used', 0))
+ if bytes_used > cfg.CONF.max_json_body_size:
+ msg = _("Total size of files to download (%(size)s bytes) "
+ "exceeds maximum allowed (%(limit)s bytes).") % {
+ 'size': bytes_used,
+ 'limit': cfg.CONF.max_json_body_size}
+ raise exception.DownloadLimitExceeded(message=msg)
+ for obj in objects:
+ file_name = obj['name']
+ if file_name not in files_to_skip:
+ contents = client.get_object(files_container, file_name)[1]
+ files[file_name] = contents
+ except exceptions.ClientException as cex:
+ raise exception.NotFound(_('Could not fetch files from '
+ 'container %(container)s, '
+ 'reason: %(reason)s.') %
+ {'container': files_container,
+ 'reason': six.text_type(cex)})
+ return files
diff --git a/heat/engine/clients/os/zun.py b/heat/engine/clients/os/zun.py
index af829275c..51c466662 100644
--- a/heat/engine/clients/os/zun.py
+++ b/heat/engine/clients/os/zun.py
@@ -11,6 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_config import cfg
+import tenacity
from zunclient import client as zun_client
from zunclient import exceptions as zc_exc
@@ -26,14 +28,15 @@ class ZunClientPlugin(client_plugin.ClientPlugin):
default_version = '1.12'
supported_versions = [
- V1_12
+ V1_12, V1_18
] = [
- '1.12'
+ '1.12', '1.18'
]
def _create(self, version=None):
if not version:
version = self.default_version
+
interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
args = {
'interface': interface,
@@ -45,6 +48,68 @@ class ZunClientPlugin(client_plugin.ClientPlugin):
client = zun_client.Client(version, **args)
return client
+ def update_container(self, container_id, **prop_diff):
+ try:
+ if prop_diff:
+ self.client(version=self.V1_18).containers.update(
+ container_id, **prop_diff)
+ except zc_exc.NotAcceptable:
+ if 'name' in prop_diff:
+ name = prop_diff.pop('name')
+ self.client().containers.rename(container_id, name=name)
+ if prop_diff:
+ self.client().containers.update(container_id, **prop_diff)
+
+ def network_detach(self, container_id, port_id):
+ with self.ignore_not_found:
+ self.client(version=self.V1_18).containers.network_detach(
+ container_id, port=port_id)
+ return True
+
+ def network_attach(self, container_id, port_id=None, net_id=None, fip=None,
+ security_groups=None):
+ with self.ignore_not_found:
+ kwargs = {}
+ if port_id:
+ kwargs['port'] = port_id
+ if net_id:
+ kwargs['network'] = net_id
+ if fip:
+ kwargs['fixed_ip'] = fip
+ self.client(version=self.V1_18).containers.network_attach(
+ container_id, **kwargs)
+ return True
+
+ @tenacity.retry(
+ stop=tenacity.stop_after_attempt(
+ cfg.CONF.max_interface_check_attempts),
+ wait=tenacity.wait_exponential(multiplier=0.5, max=12.0),
+ retry=tenacity.retry_if_result(client_plugin.retry_if_result_is_false))
+ def check_network_detach(self, container_id, port_id):
+ with self.ignore_not_found:
+ interfaces = self.client(
+ version=self.V1_18).containers.network_list(container_id)
+ for iface in interfaces:
+ if iface.port_id == port_id:
+ return False
+ return True
+
+ @tenacity.retry(
+ stop=tenacity.stop_after_attempt(
+ cfg.CONF.max_interface_check_attempts),
+ wait=tenacity.wait_exponential(multiplier=0.5, max=12.0),
+ retry=tenacity.retry_if_result(client_plugin.retry_if_result_is_false))
+ def check_network_attach(self, container_id, port_id):
+ if not port_id:
+ return True
+
+ interfaces = self.client(version=self.V1_18).containers.network_list(
+ container_id)
+ for iface in interfaces:
+ if iface.port_id == port_id:
+ return True
+ return False
+
def is_not_found(self, ex):
return isinstance(ex, zc_exc.NotFound)
diff --git a/heat/engine/clients/progress.py b/heat/engine/clients/progress.py
index cbac31a82..c699d9548 100644
--- a/heat/engine/clients/progress.py
+++ b/heat/engine/clients/progress.py
@@ -27,18 +27,18 @@ class ServerCreateProgress(object):
self.server_id = server_id
-class ServerUpdateProgress(ServerCreateProgress):
+class UpdateProgressBase(object):
"""Keeps track on particular server update task.
``handler`` is a method of client plugin performing
required update operation.
- Its first positional argument must be ``server_id``
+ Its first positional argument must be ``resource_id``
and this method must be resilent to intermittent failures,
returning ``True`` if API was successfully called, ``False`` otherwise.
If result of API call is asynchronous, client plugin must have
corresponding ``check_<handler>`` method.
- Its first positional argument must be ``server_id``
+ Its first positional argument must be ``resource_id``
and it must return ``True`` or ``False`` indicating completeness
of the update operation.
@@ -52,28 +52,46 @@ class ServerUpdateProgress(ServerCreateProgress):
structure and contain parameters with which corresponding ``handler`` and
``check_<handler>`` methods of client plugin must be called.
- ``args`` is automatically prepended with ``server_id``.
+ ``args`` is automatically prepended with ``resource_id``.
Missing ``args`` or ``kwargs`` are interpreted
as empty tuple/dict respectively.
Defaults are interpreted as both ``args`` and ``kwargs`` being empty.
"""
- def __init__(self, server_id, handler, complete=False, called=False,
+ def __init__(self, resource_id, handler, complete=False, called=False,
handler_extra=None, checker_extra=None):
- super(ServerUpdateProgress, self).__init__(server_id, complete)
+ self.complete = complete
self.called = called
self.handler = handler
self.checker = 'check_%s' % handler
# set call arguments basing on incomplete values and defaults
hargs = handler_extra or {}
- self.handler_args = (server_id,) + (hargs.get('args') or ())
+ self.handler_args = (resource_id,) + (hargs.get('args') or ())
self.handler_kwargs = hargs.get('kwargs') or {}
cargs = checker_extra or {}
- self.checker_args = (server_id,) + (cargs.get('args') or ())
+ self.checker_args = (resource_id,) + (cargs.get('args') or ())
self.checker_kwargs = cargs.get('kwargs') or {}
+class ServerUpdateProgress(UpdateProgressBase):
+ def __init__(self, server_id, handler, complete=False, called=False,
+ handler_extra=None, checker_extra=None):
+ super(ServerUpdateProgress, self).__init__(
+ server_id, handler, complete=complete, called=called,
+ handler_extra=handler_extra, checker_extra=checker_extra)
+ self.server_id = server_id
+
+
+class ContainerUpdateProgress(UpdateProgressBase):
+ def __init__(self, container_id, handler, complete=False, called=False,
+ handler_extra=None, checker_extra=None):
+ super(ContainerUpdateProgress, self).__init__(
+ container_id, handler, complete=complete, called=called,
+ handler_extra=handler_extra, checker_extra=checker_extra)
+ self.container_id = container_id
+
+
class ServerDeleteProgress(object):
def __init__(self, server_id, image_id=None, image_complete=True):
diff --git a/heat/engine/constraint/common_constraints.py b/heat/engine/constraint/common_constraints.py
index 3d3637587..124f09eb9 100644
--- a/heat/engine/constraint/common_constraints.py
+++ b/heat/engine/constraint/common_constraints.py
@@ -17,7 +17,7 @@ import netaddr
import pytz
import six
-from oslo_utils import netutils
+from neutron_lib.api import validators
from oslo_utils import timeutils
from heat.common.i18n import _
@@ -35,7 +35,12 @@ class IPConstraint(constraints.BaseCustomConstraint):
def validate(self, value, context, template=None):
self._error_message = 'Invalid IP address'
- return netutils.is_valid_ip(value)
+ if not isinstance(value, six.string_types):
+ return False
+ msg = validators.validate_ip_address(value)
+ if msg is not None:
+ return False
+ return True
class MACConstraint(constraints.BaseCustomConstraint):
@@ -100,17 +105,14 @@ class DNSDomainConstraint(DNSNameConstraint):
class CIDRConstraint(constraints.BaseCustomConstraint):
- def _validate_whitespace(self, data):
- self._error_message = ("Invalid net cidr '%s' contains "
- "whitespace" % data)
- if len(data.split()) > 1:
- return False
- return True
-
def validate(self, value, context, template=None):
try:
- netaddr.IPNetwork(value)
- return self._validate_whitespace(value)
+ netaddr.IPNetwork(value, implicit_prefix=True)
+ msg = validators.validate_subnet(value)
+ if msg is not None:
+ self._error_message = msg
+ return False
+ return True
except Exception as ex:
self._error_message = 'Invalid net cidr %s ' % six.text_type(ex)
return False
diff --git a/heat/engine/resources/openstack/magnum/bay.py b/heat/engine/resources/openstack/magnum/bay.py
index e7e3faefb..fd2ed643c 100644
--- a/heat/engine/resources/openstack/magnum/bay.py
+++ b/heat/engine/resources/openstack/magnum/bay.py
@@ -29,12 +29,17 @@ class Bay(resource.Resource):
deprecation_msg = _('Please use OS::Magnum::Cluster instead.')
support_status = support.SupportStatus(
- status=support.DEPRECATED,
+ status=support.HIDDEN,
message=deprecation_msg,
- version='9.0.0',
+ version='11.0.0',
previous_status=support.SupportStatus(
- status=support.SUPPORTED,
- version='6.0.0')
+ status=support.DEPRECATED,
+ message=deprecation_msg,
+ version='9.0.0',
+ previous_status=support.SupportStatus(
+ status=support.SUPPORTED,
+ version='6.0.0')
+ )
)
PROPERTIES = (
diff --git a/heat/engine/resources/openstack/magnum/baymodel.py b/heat/engine/resources/openstack/magnum/baymodel.py
index 751f43517..156907c7d 100644
--- a/heat/engine/resources/openstack/magnum/baymodel.py
+++ b/heat/engine/resources/openstack/magnum/baymodel.py
@@ -28,13 +28,18 @@ class BayModel(cluster_template.ClusterTemplate):
deprecate_msg = _('Please use OS::Magnum::ClusterTemplate instead.')
support_status = support.SupportStatus(
- status=support.DEPRECATED,
+ status=support.HIDDEN,
message=deprecate_msg,
- version='9.0.0',
+ version='11.0.0',
previous_status=support.SupportStatus(
- status=support.SUPPORTED,
- version='5.0.0'),
- substitute_class=cluster_template.ClusterTemplate
+ status=support.DEPRECATED,
+ message=deprecate_msg,
+ version='9.0.0',
+ previous_status=support.SupportStatus(
+ status=support.SUPPORTED,
+ version='5.0.0'),
+ substitute_class=cluster_template.ClusterTemplate
+ )
)
def translation_rules(self, props):
diff --git a/heat/engine/resources/openstack/nova/floatingip.py b/heat/engine/resources/openstack/nova/floatingip.py
index 3a2c1d288..83e5e7fca 100644
--- a/heat/engine/resources/openstack/nova/floatingip.py
+++ b/heat/engine/resources/openstack/nova/floatingip.py
@@ -38,10 +38,15 @@ class NovaFloatingIp(resource.Resource):
deprecation_msg = _('Please use OS::Neutron::FloatingIP instead.')
support_status = support.SupportStatus(
- status=support.DEPRECATED,
+ status=support.HIDDEN,
message=deprecation_msg,
- version='9.0.0',
- previous_status=support.SupportStatus(version='2014.1')
+ version='11.0.0',
+ previous_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ message=deprecation_msg,
+ version='9.0.0',
+ previous_status=support.SupportStatus(version='2014.1')
+ )
)
required_service_extension = 'os-floating-ips'
@@ -132,10 +137,15 @@ class NovaFloatingIpAssociation(resource.Resource):
deprecation_msg = _(
'Please use OS::Neutron::FloatingIPAssociation instead.')
support_status = support.SupportStatus(
- status=support.DEPRECATED,
+ status=support.HIDDEN,
message=deprecation_msg,
- version='9.0.0',
- previous_status=support.SupportStatus(version='2014.1')
+ version='11.0.0',
+ previous_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ message=deprecation_msg,
+ version='9.0.0',
+ previous_status=support.SupportStatus(version='2014.1')
+ )
)
PROPERTIES = (
diff --git a/heat/engine/resources/openstack/zun/container.py b/heat/engine/resources/openstack/zun/container.py
index 56a312c24..8a2628720 100644
--- a/heat/engine/resources/openstack/zun/container.py
+++ b/heat/engine/resources/openstack/zun/container.py
@@ -16,13 +16,17 @@ import copy
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
+from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
+from heat.engine.resources.openstack.nova import server_network_mixin
from heat.engine import support
+from heat.engine import translation
-class Container(resource.Resource):
+class Container(resource.Resource,
+ server_network_mixin.ServerNetworkMixin):
"""A resource that creates a Zun Container.
This resource creates a Zun container.
@@ -34,14 +38,27 @@ class Container(resource.Resource):
NAME, IMAGE, COMMAND, CPU, MEMORY,
ENVIRONMENT, WORKDIR, LABELS, IMAGE_PULL_POLICY,
RESTART_POLICY, INTERACTIVE, IMAGE_DRIVER, HINTS,
- HOSTNAME, SECURITY_GROUPS, MOUNTS,
+ HOSTNAME, SECURITY_GROUPS, MOUNTS, NETWORKS,
) = (
'name', 'image', 'command', 'cpu', 'memory',
'environment', 'workdir', 'labels', 'image_pull_policy',
'restart_policy', 'interactive', 'image_driver', 'hints',
- 'hostname', 'security_groups', 'mounts',
+ 'hostname', 'security_groups', 'mounts', 'networks',
)
+ _NETWORK_KEYS = (
+ NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT,
+ NETWORK_SUBNET, NETWORK_PORT_EXTRA, NETWORK_FLOATING_IP,
+ ALLOCATE_NETWORK, NIC_TAG,
+ ) = (
+ 'uuid', 'network', 'fixed_ip', 'port',
+ 'subnet', 'port_extra_properties', 'floating_ip',
+ 'allocate_network', 'tag',
+ )
+
+ _IFACE_MANAGED_KEYS = (NETWORK_PORT, NETWORK_ID,
+ NETWORK_FIXED_IP, NETWORK_SUBNET)
+
_MOUNT_KEYS = (
VOLUME_ID, MOUNT_PATH, VOLUME_SIZE
) = (
@@ -160,6 +177,41 @@ class Container(resource.Resource):
},
)
),
+ NETWORKS: properties.Schema(
+ properties.Schema.LIST,
+ _('An ordered list of nics to be added to this server, with '
+ 'information about connected networks, fixed ips, port etc.'),
+ support_status=support.SupportStatus(version='11.0.0'),
+ schema=properties.Schema(
+ properties.Schema.MAP,
+ schema={
+ NETWORK_ID: properties.Schema(
+ properties.Schema.STRING,
+ _('Name or ID of network to create a port on.'),
+ constraints=[
+ constraints.CustomConstraint('neutron.network')
+ ]
+ ),
+ NETWORK_FIXED_IP: properties.Schema(
+ properties.Schema.STRING,
+ _('Fixed IP address to specify for the port '
+ 'created on the requested network.'),
+ constraints=[
+ constraints.CustomConstraint('ip_addr')
+ ]
+ ),
+ NETWORK_PORT: properties.Schema(
+ properties.Schema.STRING,
+ _('ID of an existing port to associate with this '
+ 'container.'),
+ constraints=[
+ constraints.CustomConstraint('neutron.port')
+ ]
+ ),
+ },
+ ),
+ update_allowed=True,
+ ),
}
attributes_schema = {
@@ -182,6 +234,24 @@ class Container(resource.Resource):
entity = 'containers'
+ def translation_rules(self, props):
+ rules = [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ translation_path=[self.NETWORKS, self.NETWORK_ID],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='network'),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ translation_path=[self.NETWORKS, self.NETWORK_PORT],
+ client_plugin=self.client_plugin('neutron'),
+ finder='find_resourceid_by_name_or_id',
+ entity='port')]
+ return rules
+
def validate(self):
super(Container, self).validate()
@@ -196,6 +266,10 @@ class Container(resource.Resource):
for mount in mounts:
self._validate_mount(mount)
+ networks = self.properties[self.NETWORKS] or []
+ for network in networks:
+ self._validate_network(network)
+
def _validate_mount(self, mount):
volume_id = mount.get(self.VOLUME_ID)
volume_size = mount.get(self.VOLUME_SIZE)
@@ -215,6 +289,21 @@ class Container(resource.Resource):
"/".join([self.NETWORKS, self.VOLUME_ID]),
"/".join([self.NETWORKS, self.VOLUME_SIZE]))
+ def _validate_network(self, network):
+ net_id = network.get(self.NETWORK_ID)
+ port = network.get(self.NETWORK_PORT)
+ fixed_ip = network.get(self.NETWORK_FIXED_IP)
+
+ if net_id is None and port is None:
+ raise exception.PropertyUnspecifiedError(
+ self.NETWORK_ID, self.NETWORK_PORT)
+
+ # Don't allow specify ip and port at the same time
+ if fixed_ip and port is not None:
+ raise exception.ResourcePropertyConflict(
+ ".".join([self.NETWORKS, self.NETWORK_FIXED_IP]),
+ ".".join([self.NETWORKS, self.NETWORK_PORT]))
+
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
@@ -224,6 +313,9 @@ class Container(resource.Resource):
mounts = args.pop(self.MOUNTS, None)
if mounts:
args[self.MOUNTS] = self._build_mounts(mounts)
+ networks = args.pop(self.NETWORKS, None)
+ if networks:
+ args['nets'] = self._build_nets(networks)
container = self.client().containers.run(**args)
self.resource_id_set(container.uuid)
return container.uuid
@@ -252,6 +344,18 @@ class Container(resource.Resource):
mnts.append(mnt_info)
return mnts
+ def _build_nets(self, networks):
+ nics = self._build_nics(networks)
+ for nic in nics:
+ net_id = nic.pop('net-id', None)
+ if net_id:
+ nic[self.NETWORK_ID] = net_id
+ port_id = nic.pop('port-id', None)
+ if port_id:
+ nic[self.NETWORK_PORT] = port_id
+
+ return nics
+
def check_create_complete(self, id):
container = self.client().containers.get(id)
if container.status in ('Creating', 'Created'):
@@ -279,11 +383,64 @@ class Container(resource.Resource):
.status)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- if self.NAME in prop_diff:
- name = prop_diff.pop(self.NAME)
- self.client().containers.rename(self.resource_id, name=name)
- if prop_diff:
- self.client().containers.update(self.resource_id, **prop_diff)
+ updaters = []
+ container = None
+
+ after_props = json_snippet.properties(self.properties_schema,
+ self.context)
+ if self.NETWORKS in prop_diff:
+ prop_diff.pop(self.NETWORKS)
+ container = self.client().containers.get(self.resource_id)
+ updaters.extend(self._update_networks(container, after_props))
+
+ self.client_plugin().update_container(self.resource_id, **prop_diff)
+
+ return updaters
+
+ def _update_networks(self, container, after_props):
+ updaters = []
+ new_networks = after_props[self.NETWORKS]
+ old_networks = self.properties[self.NETWORKS]
+ security_groups = after_props[self.SECURITY_GROUPS]
+
+ interfaces = self.client(version=self.client_plugin().V1_18).\
+ containers.network_list(self.resource_id)
+ remove_ports, add_nets = self.calculate_networks(
+ old_networks, new_networks, interfaces, security_groups)
+
+ for port in remove_ports:
+ updaters.append(
+ progress.ContainerUpdateProgress(
+ self.resource_id, 'network_detach',
+ handler_extra={'args': (port,)},
+ checker_extra={'args': (port,)})
+ )
+
+ for args in add_nets:
+ updaters.append(
+ progress.ContainerUpdateProgress(
+ self.resource_id, 'network_attach',
+ handler_extra={'kwargs': args},
+ checker_extra={'args': (args['port_id'],)})
+ )
+
+ return updaters
+
+ def check_update_complete(self, updaters):
+ """Push all updaters to completion in list order."""
+ for prg in updaters:
+ if not prg.called:
+ handler = getattr(self.client_plugin(), prg.handler)
+ prg.called = handler(*prg.handler_args,
+ **prg.handler_kwargs)
+ return False
+ if not prg.complete:
+ check_complete = getattr(self.client_plugin(), prg.checker)
+ prg.complete = check_complete(*prg.checker_args,
+ **prg.checker_kwargs)
+ break
+ status = all(prg.complete for prg in updaters)
+ return status
def handle_delete(self):
if not self.resource_id:
diff --git a/heat/engine/service.py b/heat/engine/service.py
index 5156ed165..cf9528bf8 100644
--- a/heat/engine/service.py
+++ b/heat/engine/service.py
@@ -56,6 +56,7 @@ from heat.engine import stack_lock
from heat.engine import stk_defn
from heat.engine import support
from heat.engine import template as templatem
+from heat.engine import template_files
from heat.engine import update
from heat.engine import worker
from heat.objects import event as event_object
@@ -184,6 +185,11 @@ class ThreadGroupManager(object):
stack.ROLLBACK,
stack.UPDATE)):
stack.persist_state_and_release_lock(lock.engine_id)
+
+ notify = kwargs.get('notify')
+ if notify is not None:
+ assert not notify.signalled()
+ notify.signal()
else:
lock.release()
@@ -243,6 +249,38 @@ class ThreadGroupManager(object):
msg_queue.put_nowait(message)
+class NotifyEvent(object):
+ def __init__(self):
+ self._queue = eventlet.queue.LightQueue(1)
+ self._signalled = False
+
+ def signalled(self):
+ return self._signalled
+
+ def signal(self):
+ """Signal the event."""
+ if self._signalled:
+ return
+ self._signalled = True
+
+ self._queue.put(None)
+ # Yield control so that the waiting greenthread will get the message
+ # as soon as possible, so that the API handler can respond to the user.
+ # Another option would be to set the queue length to 0 (which would
+ # cause put() to block until the event has been seen, but many unit
+ # tests run in a single greenthread and would thus deadlock.
+ eventlet.sleep(0)
+
+ def wait(self):
+ """Wait for the event."""
+ try:
+ # There's no timeout argument to eventlet.event.Event available
+ # until eventlet 0.22.1, so use a queue.
+ self._queue.get(timeout=cfg.CONF.rpc_response_timeout)
+ except eventlet.queue.Empty:
+ LOG.warning('Timed out waiting for operation to start')
+
+
@profiler.trace_cls("rpc")
class EngineListener(object):
"""Listen on an AMQP queue named for the engine.
@@ -306,7 +344,7 @@ class EngineService(service.ServiceBase):
by the RPC caller.
"""
- RPC_API_VERSION = '1.35'
+ RPC_API_VERSION = '1.36'
def __init__(self, host, topic):
resources.initialise()
@@ -344,14 +382,13 @@ class EngineService(service.ServiceBase):
LOG.debug("Starting listener for engine %s", self.engine_id)
self.listener.start()
- if cfg.CONF.convergence_engine:
- self.worker_service = worker.WorkerService(
- host=self.host,
- topic=rpc_worker_api.TOPIC,
- engine_id=self.engine_id,
- thread_group_mgr=self.thread_group_mgr
- )
- self.worker_service.start()
+ self.worker_service = worker.WorkerService(
+ host=self.host,
+ topic=rpc_worker_api.TOPIC,
+ engine_id=self.engine_id,
+ thread_group_mgr=self.thread_group_mgr
+ )
+ self.worker_service.start()
target = messaging.Target(
version=self.RPC_API_VERSION, server=self.host,
@@ -400,8 +437,7 @@ class EngineService(service.ServiceBase):
if self.listener:
self.listener.stop()
- if cfg.CONF.convergence_engine and self.worker_service:
- # Stop the WorkerService
+ if self.worker_service:
self.worker_service.stop()
# Wait for all active threads to be finished
@@ -656,8 +692,9 @@ class EngineService(service.ServiceBase):
def _parse_template_and_validate_stack(self, cnxt, stack_name, template,
params, files, environment_files,
- args, owner_id=None,
- nested_depth=0, user_creds_id=None,
+ files_container, args,
+ owner_id=None, nested_depth=0,
+ user_creds_id=None,
stack_user_project_id=None,
convergence=False,
parent_resource_name=None,
@@ -680,9 +717,12 @@ class EngineService(service.ServiceBase):
if template_id is not None:
tmpl = templatem.Template.load(cnxt, template_id)
else:
+ if files_container:
+ files = template_files.get_files_from_container(
+ cnxt, files_container, files)
tmpl = templatem.Template(template, files=files)
- env_util.merge_environments(environment_files, files, params,
- tmpl.all_param_schemata(files))
+ env_util.merge_environments(environment_files, files,
+ params, tmpl.all_param_schemata(files))
tmpl.env = environment.Environment(params)
self._validate_new_stack(cnxt, stack_name, tmpl)
@@ -706,7 +746,7 @@ class EngineService(service.ServiceBase):
@context.request_context
def preview_stack(self, cnxt, stack_name, template, params, files,
- args, environment_files=None):
+ args, environment_files=None, files_container=None):
"""Simulate a new stack using the provided template.
Note that at this stage the template has already been fetched from the
@@ -721,6 +761,7 @@ class EngineService(service.ServiceBase):
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
+ :param files_container: optional swift container name
"""
LOG.info('previewing stack %s', stack_name)
@@ -732,6 +773,7 @@ class EngineService(service.ServiceBase):
params,
files,
environment_files,
+ files_container,
args,
convergence=conv_eng)
@@ -740,7 +782,8 @@ class EngineService(service.ServiceBase):
@context.request_context
def create_stack(self, cnxt, stack_name, template, params, files,
args, environment_files=None,
- owner_id=None, nested_depth=0, user_creds_id=None,
+ files_container=None, owner_id=None,
+ nested_depth=0, user_creds_id=None,
stack_user_project_id=None, parent_resource_name=None,
template_id=None):
"""Create a new stack using the template provided.
@@ -757,6 +800,7 @@ class EngineService(service.ServiceBase):
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
+ :param files_container: optional swift container name
:param owner_id: parent stack ID for nested stacks, only expected when
called from another heat-engine (not a user option)
:param nested_depth: the nested depth for nested stacks, only expected
@@ -788,9 +832,9 @@ class EngineService(service.ServiceBase):
stack = self._parse_template_and_validate_stack(
cnxt, stack_name, template, params, files, environment_files,
- args, owner_id, nested_depth, user_creds_id,
- stack_user_project_id, convergence, parent_resource_name,
- template_id)
+ files_container, args, owner_id, nested_depth,
+ user_creds_id, stack_user_project_id, convergence,
+ parent_resource_name, template_id)
stack_id = stack.store()
if cfg.CONF.reauthentication_auth_method == 'trusts':
@@ -817,7 +861,8 @@ class EngineService(service.ServiceBase):
def _prepare_stack_updates(self, cnxt, current_stack,
template, params, environment_files,
- files, args, template_id=None):
+ files, files_container,
+ args, template_id=None):
"""Return the current and updated stack for a given transition.
Changes *will not* be persisted, this is a helper method for
@@ -866,10 +911,13 @@ class EngineService(service.ServiceBase):
raise exception.NotSupported(feature=msg)
new_files = current_stack.t.files
+ if files_container:
+ files = template_files.get_files_from_container(
+ cnxt, files_container, files)
new_files.update(files or {})
tmpl = templatem.Template(new_template, files=new_files)
- env_util.merge_environments(environment_files, files, params,
- tmpl.all_param_schemata(files))
+ env_util.merge_environments(environment_files, new_files,
+ params, tmpl.all_param_schemata(files))
existing_env = current_stack.env.env_as_dict()
existing_params = existing_env[env_fmt.PARAMETERS]
clear_params = set(args.get(rpc_api.PARAM_CLEAR_PARAMETERS, []))
@@ -888,8 +936,12 @@ class EngineService(service.ServiceBase):
if template_id is not None:
tmpl = templatem.Template.load(cnxt, template_id)
else:
+ if files_container:
+ files = template_files.get_files_from_container(
+ cnxt, files_container, files)
tmpl = templatem.Template(template, files=files)
- env_util.merge_environments(environment_files, files, params,
+ env_util.merge_environments(environment_files,
+ files, params,
tmpl.all_param_schemata(files))
tmpl.env = environment.Environment(params)
@@ -932,7 +984,8 @@ class EngineService(service.ServiceBase):
@context.request_context
def update_stack(self, cnxt, stack_identity, template, params,
- files, args, environment_files=None, template_id=None):
+ files, args, environment_files=None,
+ files_container=None, template_id=None):
"""Update an existing stack based on the provided template and params.
Note that at this stage the template has already been fetched from the
@@ -947,6 +1000,7 @@ class EngineService(service.ServiceBase):
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
+ :param files_container: optional swift container name
:param template_id: the ID of a pre-stored template in the DB
"""
# Get the database representation of the existing stack
@@ -970,7 +1024,8 @@ class EngineService(service.ServiceBase):
tmpl, current_stack, updated_stack = self._prepare_stack_updates(
cnxt, current_stack, template, params,
- environment_files, files, args, template_id)
+ environment_files, files, files_container,
+ args, template_id)
if current_stack.convergence:
current_stack.thread_group_mgr = self.thread_group_mgr
@@ -978,19 +1033,23 @@ class EngineService(service.ServiceBase):
new_stack=updated_stack)
else:
msg_queue = eventlet.queue.LightQueue()
+ stored_event = NotifyEvent()
th = self.thread_group_mgr.start_with_lock(cnxt, current_stack,
self.engine_id,
current_stack.update,
updated_stack,
- msg_queue=msg_queue)
+ msg_queue=msg_queue,
+ notify=stored_event)
th.link(self.thread_group_mgr.remove_msg_queue,
current_stack.id, msg_queue)
self.thread_group_mgr.add_msg_queue(current_stack.id, msg_queue)
+ stored_event.wait()
return dict(current_stack.identifier())
@context.request_context
def preview_update_stack(self, cnxt, stack_identity, template, params,
- files, args, environment_files=None):
+ files, args, environment_files=None,
+ files_container=None):
"""Shows the resources that would be updated.
The preview_update_stack method shows the resources that would be
@@ -1013,7 +1072,7 @@ class EngineService(service.ServiceBase):
tmpl, current_stack, updated_stack = self._prepare_stack_updates(
cnxt, current_stack, template, params,
- environment_files, files, args)
+ environment_files, files, files_container, args)
update_task = update.StackUpdate(current_stack, updated_stack, None)
@@ -1172,8 +1231,8 @@ class EngineService(service.ServiceBase):
@context.request_context
def validate_template(self, cnxt, template, params=None, files=None,
- environment_files=None, show_nested=False,
- ignorable_errors=None):
+ environment_files=None, files_container=None,
+ show_nested=False, ignorable_errors=None):
"""Check the validity of a template.
Checks, so far as we can, that a template is valid, and returns
@@ -1187,6 +1246,7 @@ class EngineService(service.ServiceBase):
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
+ :param files_container: optional swift container name
:param show_nested: if True, any nested templates will be checked
:param ignorable_errors: List of error_code to be ignored as part of
validation
@@ -1203,10 +1263,12 @@ class EngineService(service.ServiceBase):
msg = (_("Invalid codes in ignore_errors : %s") %
list(invalid_codes))
return webob.exc.HTTPBadRequest(explanation=msg)
-
+ if files_container:
+ files = template_files.get_files_from_container(
+ cnxt, files_container, files)
tmpl = templatem.Template(template, files=files)
- env_util.merge_environments(environment_files, files, params,
- tmpl.all_param_schemata(files))
+ env_util.merge_environments(environment_files, files,
+ params, tmpl.all_param_schemata(files))
tmpl.env = environment.Environment(params)
try:
self._validate_template(cnxt, tmpl)
@@ -1349,16 +1411,20 @@ class EngineService(service.ServiceBase):
stack = parser.Stack.load(cnxt, stack=st)
self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
- if stack.convergence and cfg.CONF.convergence_engine:
- def convergence_delete():
- stack.thread_group_mgr = self.thread_group_mgr
+ if stack.convergence:
+ stack.thread_group_mgr = self.thread_group_mgr
+ template = templatem.Template.create_empty_template(
+ from_template=stack.t)
+
+ # stop existing traversal; mark stack as FAILED
+ if stack.status == stack.IN_PROGRESS:
+ self.worker_service.stop_traversal(stack)
+
+ def stop_workers():
self.worker_service.stop_all_workers(stack)
- stack.delete_all_snapshots()
- template = templatem.Template.create_empty_template(
- from_template=stack.t)
- stack.converge_stack(template=template, action=stack.DELETE)
- self.thread_group_mgr.start(stack.id, convergence_delete)
+ stack.converge_stack(template=template, action=stack.DELETE,
+ pre_converge=stop_workers)
return
lock = stack_lock.StackLock(cnxt, stack.id, self.engine_id)
@@ -1367,8 +1433,11 @@ class EngineService(service.ServiceBase):
# Successfully acquired lock
if acquire_result is None:
self.thread_group_mgr.stop_timers(stack.id)
+ stored = NotifyEvent()
self.thread_group_mgr.start_with_acquired_lock(stack, lock,
- stack.delete)
+ stack.delete,
+ notify=stored)
+ stored.wait()
return
# Current engine has the lock
@@ -1973,30 +2042,28 @@ class EngineService(service.ServiceBase):
@context.request_context
def stack_suspend(self, cnxt, stack_identity):
"""Handle request to perform suspend action on a stack."""
- def _stack_suspend(stack):
- LOG.debug("suspending stack %s", stack.name)
- stack.suspend()
-
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
+ stored_event = NotifyEvent()
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
- _stack_suspend, stack)
+ stack.suspend,
+ notify=stored_event)
+ stored_event.wait()
@context.request_context
def stack_resume(self, cnxt, stack_identity):
"""Handle request to perform a resume action on a stack."""
- def _stack_resume(stack):
- LOG.debug("resuming stack %s", stack.name)
- stack.resume()
-
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
+ stored_event = NotifyEvent()
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
- _stack_resume, stack)
+ stack.resume,
+ notify=stored_event)
+ stored_event.wait()
@context.request_context
def stack_snapshot(self, cnxt, stack_identity, name):
@@ -2068,15 +2135,13 @@ class EngineService(service.ServiceBase):
stack = parser.Stack.load(cnxt, stack=s)
LOG.info("Checking stack %s", stack.name)
+ stored_event = NotifyEvent()
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
- stack.check)
+ stack.check, notify=stored_event)
+ stored_event.wait()
@context.request_context
def stack_restore(self, cnxt, stack_identity, snapshot_id):
- def _stack_restore(stack, snapshot):
- LOG.debug("restoring stack %s", stack.name)
- stack.restore(snapshot)
-
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
self.resource_enforcer.enforce_stack(stack, is_registered_policy=True)
@@ -2092,8 +2157,11 @@ class EngineService(service.ServiceBase):
action=stack.RESTORE,
new_stack=new_stack)
else:
+ stored_event = NotifyEvent()
self.thread_group_mgr.start_with_lock(
- cnxt, stack, self.engine_id, _stack_restore, stack, snapshot)
+ cnxt, stack, self.engine_id, stack.restore, snapshot,
+ notify=stored_event)
+ stored_event.wait()
@context.request_context
def stack_list_snapshots(self, cnxt, stack_identity):
diff --git a/heat/engine/stack.py b/heat/engine/stack.py
index a08d1e04c..66eba3cbe 100644
--- a/heat/engine/stack.py
+++ b/heat/engine/stack.py
@@ -302,16 +302,18 @@ class Stack(collections.Mapping):
return {n: self.defn.output_definition(n)
for n in self.defn.enabled_output_names()}
+ def _resources_for_defn(self, stack_defn):
+ return {
+ name: resource.Resource(name,
+ stack_defn.resource_definition(name),
+ self)
+ for name in stack_defn.enabled_rsrc_names()
+ }
+
@property
def resources(self):
if self._resources is None:
- self._resources = {
- name: resource.Resource(name,
- self.defn.resource_definition(name),
- self)
- for name in self.defn.enabled_rsrc_names()
- }
-
+ self._resources = self._resources_for_defn(self.defn)
return self._resources
def _update_all_resource_data(self, for_resources, for_outputs):
@@ -1120,7 +1122,8 @@ class Stack(collections.Mapping):
@scheduler.wrappertask
def stack_task(self, action, reverse=False, post_func=None,
- aggregate_exceptions=False, pre_completion_func=None):
+ aggregate_exceptions=False, pre_completion_func=None,
+ notify=None):
"""A task to perform an action on the stack.
All of the resources are traversed in forward or reverse dependency
@@ -1145,9 +1148,13 @@ class Stack(collections.Mapping):
'Failed stack pre-ops: %s' % six.text_type(e))
if callable(post_func):
post_func()
+ # No need to call notify.signal(), because persistence of the
+ # state is always deferred here.
return
self.state_set(action, self.IN_PROGRESS,
'Stack %s started' % action)
+ if notify is not None:
+ notify.signal()
stack_status = self.COMPLETE
reason = 'Stack %s completed successfully' % action
@@ -1206,12 +1213,13 @@ class Stack(collections.Mapping):
@profiler.trace('Stack.check', hide_args=False)
@reset_state_on_error
- def check(self):
+ def check(self, notify=None):
self.updated_time = oslo_timeutils.utcnow()
checker = scheduler.TaskRunner(
self.stack_task, self.CHECK,
post_func=self.supports_check_action,
- aggregate_exceptions=True)
+ aggregate_exceptions=True,
+ notify=notify)
checker()
def supports_check_action(self):
@@ -1279,7 +1287,7 @@ class Stack(collections.Mapping):
@profiler.trace('Stack.update', hide_args=False)
@reset_state_on_error
- def update(self, newstack, msg_queue=None):
+ def update(self, newstack, msg_queue=None, notify=None):
"""Update the stack.
Compare the current stack with newstack,
@@ -1294,11 +1302,12 @@ class Stack(collections.Mapping):
"""
self.updated_time = oslo_timeutils.utcnow()
updater = scheduler.TaskRunner(self.update_task, newstack,
- msg_queue=msg_queue)
+ msg_queue=msg_queue, notify=notify)
updater()
@profiler.trace('Stack.converge_stack', hide_args=False)
- def converge_stack(self, template, action=UPDATE, new_stack=None):
+ def converge_stack(self, template, action=UPDATE, new_stack=None,
+ pre_converge=None):
"""Update the stack template and trigger convergence for resources."""
if action not in [self.CREATE, self.ADOPT]:
# no back-up template for create action
@@ -1352,9 +1361,10 @@ class Stack(collections.Mapping):
# TODO(later): lifecycle_plugin_utils.do_pre_ops
- self.thread_group_mgr.start(self.id, self._converge_create_or_update)
+ self.thread_group_mgr.start(self.id, self._converge_create_or_update,
+ pre_converge=pre_converge)
- def _converge_create_or_update(self):
+ def _converge_create_or_update(self, pre_converge=None):
current_resources = self._update_or_store_resources()
self._compute_convg_dependencies(self.ext_rsrcs_db, self.dependencies,
current_resources)
@@ -1371,6 +1381,16 @@ class Stack(collections.Mapping):
'action': self.action})
return
+ if callable(pre_converge):
+ pre_converge()
+ if self.action == self.DELETE:
+ try:
+ self.delete_all_snapshots()
+ except Exception as exc:
+ self.state_set(self.action, self.FAILED, six.text_type(exc))
+ self.purge_db()
+ return
+
LOG.debug('Starting traversal %s with dependencies: %s',
self.current_traversal, self.convergence_dependencies)
@@ -1526,11 +1546,14 @@ class Stack(collections.Mapping):
self.state_set(self.action, self.FAILED, six.text_type(reason))
@scheduler.wrappertask
- def update_task(self, newstack, action=UPDATE, msg_queue=None):
+ def update_task(self, newstack, action=UPDATE,
+ msg_queue=None, notify=None):
if action not in (self.UPDATE, self.ROLLBACK, self.RESTORE):
LOG.error("Unexpected action %s passed to update!", action)
self.state_set(self.UPDATE, self.FAILED,
"Invalid action %s" % action)
+ if notify is not None:
+ notify.signal()
return
try:
@@ -1539,6 +1562,8 @@ class Stack(collections.Mapping):
except Exception as e:
self.state_set(action, self.FAILED, e.args[0] if e.args else
'Failed stack pre-ops: %s' % six.text_type(e))
+ if notify is not None:
+ notify.signal()
return
if self.status == self.IN_PROGRESS:
if action == self.ROLLBACK:
@@ -1547,6 +1572,8 @@ class Stack(collections.Mapping):
reason = _('Attempted to %s an IN_PROGRESS '
'stack') % action
self.reset_stack_and_resources_in_progress(reason)
+ if notify is not None:
+ notify.signal()
return
# Save a copy of the new template. To avoid two DB writes
@@ -1560,6 +1587,10 @@ class Stack(collections.Mapping):
self.status_reason = 'Stack %s started' % action
self._send_notification_and_add_event()
self.store()
+ # Notify the caller that the state is stored
+ if notify is not None:
+ notify.signal()
+
if prev_tmpl_id is not None:
raw_template_object.RawTemplate.delete(self.context, prev_tmpl_id)
@@ -1828,7 +1859,7 @@ class Stack(collections.Mapping):
@profiler.trace('Stack.delete', hide_args=False)
@reset_state_on_error
- def delete(self, action=DELETE, backup=False, abandon=False):
+ def delete(self, action=DELETE, backup=False, abandon=False, notify=None):
"""Delete all of the resources, and then the stack itself.
The action parameter is used to differentiate between a user
@@ -1844,12 +1875,16 @@ class Stack(collections.Mapping):
LOG.error("Unexpected action %s passed to delete!", action)
self.state_set(self.DELETE, self.FAILED,
"Invalid action %s" % action)
+ if notify is not None:
+ notify.signal()
return
stack_status = self.COMPLETE
reason = 'Stack %s completed successfully' % action
self.state_set(action, self.IN_PROGRESS, 'Stack %s started' %
action)
+ if notify is not None:
+ notify.signal()
backup_stack = self._backup_stack(False)
if backup_stack:
@@ -1913,7 +1948,7 @@ class Stack(collections.Mapping):
@profiler.trace('Stack.suspend', hide_args=False)
@reset_state_on_error
- def suspend(self):
+ def suspend(self, notify=None):
"""Suspend the stack.
Invokes handle_suspend for all stack resources.
@@ -1924,6 +1959,7 @@ class Stack(collections.Mapping):
other than move to SUSPEND_COMPLETE, so the resources must implement
handle_suspend for this to have any effect.
"""
+ LOG.debug("Suspending stack %s", self)
# No need to suspend if the stack has been suspended
if self.state == (self.SUSPEND, self.COMPLETE):
LOG.info('%s is already suspended', self)
@@ -1933,12 +1969,13 @@ class Stack(collections.Mapping):
sus_task = scheduler.TaskRunner(
self.stack_task,
action=self.SUSPEND,
- reverse=True)
+ reverse=True,
+ notify=notify)
sus_task(timeout=self.timeout_secs())
@profiler.trace('Stack.resume', hide_args=False)
@reset_state_on_error
- def resume(self):
+ def resume(self, notify=None):
"""Resume the stack.
Invokes handle_resume for all stack resources.
@@ -1949,6 +1986,7 @@ class Stack(collections.Mapping):
other than move to RESUME_COMPLETE, so the resources must implement
handle_resume for this to have any effect.
"""
+ LOG.debug("Resuming stack %s", self)
# No need to resume if the stack has been resumed
if self.state == (self.RESUME, self.COMPLETE):
LOG.info('%s is already resumed', self)
@@ -1958,7 +1996,8 @@ class Stack(collections.Mapping):
sus_task = scheduler.TaskRunner(
self.stack_task,
action=self.RESUME,
- reverse=False)
+ reverse=False,
+ notify=notify)
sus_task(timeout=self.timeout_secs())
@profiler.trace('Stack.snapshot', hide_args=False)
@@ -1980,21 +2019,28 @@ class Stack(collections.Mapping):
self.delete_snapshot(snapshot)
snapshot_object.Snapshot.delete(self.context, snapshot.id)
+ @staticmethod
+ def _template_from_snapshot_data(snapshot_data):
+ env = environment.Environment(snapshot_data['environment'])
+ files = snapshot_data['files']
+ return tmpl.Template(snapshot_data['template'], env=env, files=files)
+
@profiler.trace('Stack.delete_snapshot', hide_args=False)
def delete_snapshot(self, snapshot):
"""Remove a snapshot from the backends."""
- for name, rsrc in six.iteritems(self.resources):
- snapshot_data = snapshot.data
- if snapshot_data:
+ snapshot_data = snapshot.data
+ if snapshot_data:
+ template = self._template_from_snapshot_data(snapshot_data)
+ ss_defn = self.defn.clone_with_new_template(template,
+ self.identifier())
+ resources = self._resources_for_defn(ss_defn)
+ for name, rsrc in six.iteritems(resources):
data = snapshot.data['resources'].get(name)
if data:
scheduler.TaskRunner(rsrc.delete_snapshot, data)()
def restore_data(self, snapshot):
- env = environment.Environment(snapshot.data['environment'])
- files = snapshot.data['files']
- template = tmpl.Template(snapshot.data['template'],
- env=env, files=files)
+ template = self._template_from_snapshot_data(snapshot.data)
newstack = self.__class__(self.context, self.name, template,
timeout_mins=self.timeout_mins,
disable_rollback=self.disable_rollback)
@@ -2013,16 +2059,17 @@ class Stack(collections.Mapping):
return newstack, template
@reset_state_on_error
- def restore(self, snapshot):
+ def restore(self, snapshot, notify=None):
"""Restore the given snapshot.
Invokes handle_restore on all resources.
"""
+ LOG.debug("Restoring stack %s", self)
self.updated_time = oslo_timeutils.utcnow()
newstack = self.restore_data(snapshot)[0]
updater = scheduler.TaskRunner(self.update_task, newstack,
- action=self.RESTORE)
+ action=self.RESTORE, notify=notify)
updater()
def get_availability_zones(self):
diff --git a/heat/engine/template_files.py b/heat/engine/template_files.py
index 844a5fbe4..0e7f6e77e 100644
--- a/heat/engine/template_files.py
+++ b/heat/engine/template_files.py
@@ -16,6 +16,7 @@ import six
import weakref
from heat.common import context
+from heat.common import exception
from heat.common.i18n import _
from heat.db.sqlalchemy import api as db_api
from heat.objects import raw_template_files
@@ -134,3 +135,21 @@ class TemplateFiles(collections.Mapping):
new_files = files
self.files_id = None # not persisted yet
self.files = ReadOnlyDict(new_files)
+
+
+def get_files_from_container(cnxt, files_container, files=None):
+
+ if files is None:
+ files = {}
+ else:
+ files = files.copy()
+
+ swift_plugin = cnxt.clients.client_plugin('swift')
+
+ if not swift_plugin:
+ raise exception.ClientNotAvailable(client_name='swift')
+
+ new_files = swift_plugin.get_files_from_container(files_container,
+ list(files.keys()))
+ new_files.update(files)
+ return new_files
diff --git a/heat/engine/worker.py b/heat/engine/worker.py
index 2e38e7acb..274dd436f 100644
--- a/heat/engine/worker.py
+++ b/heat/engine/worker.py
@@ -125,11 +125,11 @@ class WorkerService(object):
_stop_traversal(child)
def stop_all_workers(self, stack):
- # stop the traversal
- if stack.status == stack.IN_PROGRESS:
- self.stop_traversal(stack)
+ """Cancel all existing worker threads for the stack.
- # cancel existing workers
+ Threads will stop running at their next yield point, whether or not the
+ resource operations are complete.
+ """
cancelled = _cancel_workers(stack, self.thread_group_mgr,
self.engine_id, self._rpc_client)
if not cancelled:
diff --git a/heat/rpc/client.py b/heat/rpc/client.py
index 5e833f93e..b2f4a4599 100644
--- a/heat/rpc/client.py
+++ b/heat/rpc/client.py
@@ -61,6 +61,7 @@ class EngineClient(object):
and list_software_configs
1.34 - Add migrate_convergence_1 call
1.35 - Add with_condition to list_template_functions
+ 1.36 - Add files_container to create/update/preview/validate
"""
BASE_RPC_API_VERSION = '1.0'
@@ -226,7 +227,7 @@ class EngineClient(object):
version='1.20')
def preview_stack(self, ctxt, stack_name, template, params, files,
- args, environment_files=None):
+ args, environment_files=None, files_container=None):
"""Simulates a new stack using the provided template.
Note that at this stage the template has already been fetched from the
@@ -241,17 +242,19 @@ class EngineClient(object):
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
+ :param files_container: name of swift container
"""
return self.call(ctxt,
self.make_msg('preview_stack', stack_name=stack_name,
template=template,
params=params, files=files,
environment_files=environment_files,
+ files_container=files_container,
args=args),
- version='1.23')
+ version='1.36')
def create_stack(self, ctxt, stack_name, template, params, files,
- args, environment_files=None):
+ args, environment_files=None, files_container=None):
"""Creates a new stack using the template provided.
Note that at this stage the template has already been fetched from the
@@ -266,12 +269,14 @@ class EngineClient(object):
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
+ :param files_container: name of swift container
"""
return self._create_stack(ctxt, stack_name, template, params, files,
- args, environment_files=environment_files)
+ args, environment_files=environment_files,
+ files_container=files_container)
def _create_stack(self, ctxt, stack_name, template, params, files,
- args, environment_files=None,
+ args, environment_files=None, files_container=None,
owner_id=None, nested_depth=0, user_creds_id=None,
stack_user_project_id=None, parent_resource_name=None,
template_id=None):
@@ -292,16 +297,18 @@ class EngineClient(object):
template=template,
params=params, files=files,
environment_files=environment_files,
+ files_container=files_container,
args=args, owner_id=owner_id,
nested_depth=nested_depth,
user_creds_id=user_creds_id,
stack_user_project_id=stack_user_project_id,
parent_resource_name=parent_resource_name,
template_id=template_id),
- version='1.29')
+ version='1.36')
def update_stack(self, ctxt, stack_identity, template, params,
- files, args, environment_files=None):
+ files, args, environment_files=None,
+ files_container=None):
"""Updates an existing stack based on the provided template and params.
Note that at this stage the template has already been fetched from the
@@ -316,14 +323,16 @@ class EngineClient(object):
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
+ :param files_container: name of swift container
"""
return self._update_stack(ctxt, stack_identity, template, params,
files, args,
- environment_files=environment_files)
+ environment_files=environment_files,
+ files_container=files_container)
def _update_stack(self, ctxt, stack_identity, template, params,
files, args, environment_files=None,
- template_id=None):
+ files_container=None, template_id=None):
"""Internal interface for engine-to-engine communication via RPC.
Allows an additional option which should not be exposed to users via
@@ -338,12 +347,14 @@ class EngineClient(object):
params=params,
files=files,
environment_files=environment_files,
+ files_container=files_container,
args=args,
template_id=template_id),
- version='1.29')
+ version='1.36')
def preview_update_stack(self, ctxt, stack_identity, template, params,
- files, args, environment_files=None):
+ files, args, environment_files=None,
+ files_container=None):
"""Returns the resources that would be changed in an update.
Based on the provided template and parameters.
@@ -359,6 +370,7 @@ class EngineClient(object):
:param environment_files: optional ordered list of environment file
names included in the files dict
:type environment_files: list or None
+ :param files_container: name of swift container
"""
return self.call(ctxt,
self.make_msg('preview_update_stack',
@@ -367,13 +379,14 @@ class EngineClient(object):
params=params,
files=files,
environment_files=environment_files,
+ files_container=files_container,
args=args,
),
- version='1.23')
+ version='1.36')
def validate_template(self, ctxt, template, params=None, files=None,
- environment_files=None, show_nested=False,
- ignorable_errors=None):
+ environment_files=None, files_container=None,
+ show_nested=False, ignorable_errors=None):
"""Uses the stack parser to check the validity of a template.
:param ctxt: RPC context.
@@ -382,6 +395,7 @@ class EngineClient(object):
:param files: files referenced from the environment/template.
:param environment_files: ordered list of environment file names
included in the files dict
+ :param files_container: name of swift container
:param show_nested: if True nested templates will be validated
:param ignorable_errors: List of error_code to be ignored as part of
validation
@@ -393,8 +407,9 @@ class EngineClient(object):
files=files,
show_nested=show_nested,
environment_files=environment_files,
+ files_container=files_container,
ignorable_errors=ignorable_errors),
- version='1.24')
+ version='1.36')
def authenticated_to_backend(self, ctxt):
"""Validate the credentials in the RPC context.
diff --git a/heat/tests/api/cfn/test_api_cfn_v1.py b/heat/tests/api/cfn/test_api_cfn_v1.py
index abd2fbddf..7b444f4dd 100644
--- a/heat/tests/api/cfn/test_api_cfn_v1.py
+++ b/heat/tests/api/cfn/test_api_cfn_v1.py
@@ -546,6 +546,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'params': engine_parms,
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': engine_args,
'owner_id': None,
'nested_depth': 0,
@@ -553,7 +554,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_create_rollback(self):
@@ -592,6 +593,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'params': engine_parms,
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': engine_args,
'owner_id': None,
'nested_depth': 0,
@@ -599,7 +601,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_create_onfailure_true(self):
@@ -638,6 +640,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'params': engine_parms,
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': engine_args,
'owner_id': None,
'nested_depth': 0,
@@ -645,7 +648,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_create_onfailure_false_delete(self):
@@ -674,6 +677,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'params': engine_parms,
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': engine_args,
'owner_id': None,
'nested_depth': 0,
@@ -681,7 +685,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
expected = {
@@ -730,6 +734,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'params': engine_parms,
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': engine_args,
'owner_id': None,
'nested_depth': 0,
@@ -737,7 +742,7 @@ class CfnStackControllerTest(common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_create_onfailure_err(self):
@@ -913,9 +918,10 @@ class CfnStackControllerTest(common.HeatTestCase):
'params': engine_parms,
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': engine_args,
'template_id': None}),
- version='1.29'
+ version='1.36'
)], self.m_call.call_args_list)
def test_cancel_update(self):
@@ -1091,9 +1097,10 @@ class CfnStackControllerTest(common.HeatTestCase):
dummy_req.context,
('validate_template', {'template': json_template, 'params': None,
'files': None, 'environment_files': None,
+ 'files_container': None,
'show_nested': False,
'ignorable_errors': None}),
- version='1.24'
+ version='1.36'
)
def test_delete(self):
diff --git a/heat/tests/api/openstack_v1/test_stacks.py b/heat/tests/api/openstack_v1/test_stacks.py
index 2fa3ebc60..b0c51799a 100644
--- a/heat/tests/api/openstack_v1/test_stacks.py
+++ b/heat/tests/api/openstack_v1/test_stacks.py
@@ -756,6 +756,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': ['foo.yaml'],
+ 'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
@@ -763,7 +764,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_create_with_tags(self, mock_enforce):
@@ -803,6 +804,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
'owner_id': None,
'nested_depth': 0,
@@ -810,7 +812,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_adopt(self, mock_enforce):
@@ -868,6 +870,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)},
'owner_id': None,
@@ -876,7 +879,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_adopt_timeout_not_int(self, mock_enforce):
@@ -957,6 +960,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {'my.yaml': 'This is the file contents.'},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
@@ -964,7 +968,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_create_err_rpcerr(self, mock_enforce):
@@ -1025,6 +1029,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
@@ -1032,7 +1037,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
self.assertEqual(3, mock_call.call_count)
@@ -1072,6 +1077,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
@@ -1079,7 +1085,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_create_timeout_not_int(self, mock_enforce):
@@ -1158,6 +1164,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
@@ -1165,7 +1172,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'parent_resource_name': None,
'stack_user_project_id': None,
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_create_err_stack_bad_reqest(self, mock_enforce):
@@ -1235,8 +1242,9 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}}),
- version='1.23'
+ version='1.36'
)
self.assertEqual({'stack': 'formatted_stack_preview'}, response)
@@ -1280,8 +1288,9 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30}}),
- version='1.23'
+ version='1.36'
)
def test_preview_update_stack_patch(self, mock_enforce):
@@ -1321,9 +1330,10 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}}),
- version='1.23'
+ version='1.36'
)
@mock.patch.object(rpc_client.EngineClient, 'call')
@@ -1369,9 +1379,10 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
u'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_lookup(self, mock_enforce):
@@ -1829,9 +1840,10 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_update_with_tags(self, mock_enforce):
@@ -1870,9 +1882,10 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_update_bad_name(self, mock_enforce):
@@ -1914,9 +1927,10 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
u'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {'timeout_mins': 30},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_update_timeout_not_int(self, mock_enforce):
@@ -1999,10 +2013,11 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_update_with_existing_parameters(self, mock_enforce):
@@ -2039,10 +2054,11 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_update_with_existing_parameters_with_tags(self, mock_enforce):
@@ -2080,11 +2096,12 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30,
'tags': ['tag1', 'tag2']},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_update_with_patched_existing_parameters(self, mock_enforce):
@@ -2122,10 +2139,11 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_update_with_patch_timeout_not_int(self, mock_enforce):
@@ -2189,11 +2207,12 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_update_with_patched_and_default_parameters(
@@ -2234,11 +2253,12 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30},
'template_id': None}),
- version='1.29'
+ version='1.36'
)
def test_delete(self, mock_enforce):
@@ -2398,9 +2418,10 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'show_nested': False,
'ignorable_errors': None}),
- version='1.24'
+ version='1.36'
)
def test_validate_template_error(self, mock_enforce):
@@ -2428,9 +2449,10 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'resource_registry': {}},
'files': {},
'environment_files': None,
+ 'files_container': None,
'show_nested': False,
'ignorable_errors': None}),
- version='1.24'
+ version='1.36'
)
def test_validate_err_denied_policy(self, mock_enforce):
diff --git a/heat/tests/clients/test_blazar_client.py b/heat/tests/clients/test_blazar_client.py
new file mode 100644
index 000000000..f9410f9fe
--- /dev/null
+++ b/heat/tests/clients/test_blazar_client.py
@@ -0,0 +1,42 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.tests import common
+from heat.tests import utils
+import mock
+
+
+class BlazarClientPluginTest(common.HeatTestCase):
+
+ def setUp(self):
+ super(BlazarClientPluginTest, self).setUp()
+ self.blazar_client = mock.MagicMock()
+ context = utils.dummy_context()
+ self.blazar_client_plugin = context.clients.client_plugin('blazar')
+
+ def _stub_client(self):
+ self.blazar_client_plugin.client = lambda: self.blazar_client
+
+ def test_create(self):
+ client = self.blazar_client_plugin.client()
+ self.assertEqual(None, client.blazar_url)
+
+ def test_has_host_pass(self):
+ self._stub_client()
+ self.blazar_client.host.list.return_value = ['hosta']
+ self.assertEqual(True, self.blazar_client_plugin.has_host())
+
+ def test_has_host_fail(self):
+ self._stub_client()
+ self.blazar_client.host.list.return_value = []
+ self.assertEqual(False, self.blazar_client_plugin.has_host())
diff --git a/heat/tests/clients/test_zun_client.py b/heat/tests/clients/test_zun_client.py
index 2bb636554..2573a095a 100644
--- a/heat/tests/clients/test_zun_client.py
+++ b/heat/tests/clients/test_zun_client.py
@@ -10,12 +10,24 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+
+from zunclient import exceptions as zc_exc
+
from heat.tests import common
from heat.tests import utils
class ZunClientPluginTest(common.HeatTestCase):
+ def setUp(self):
+ super(ZunClientPluginTest, self).setUp()
+ self.client = mock.Mock()
+ context = utils.dummy_context()
+ self.plugin = context.clients.client_plugin('zun')
+ self.plugin.client = lambda **kw: self.client
+ self.resource_id = '123456'
+
def test_create(self):
context = utils.dummy_context()
plugin = context.clients.client_plugin('zun')
@@ -24,3 +36,21 @@ class ZunClientPluginTest(common.HeatTestCase):
client.containers.api.session.auth.endpoint)
self.assertEqual('1.12',
client.api_version.get_string())
+
+ def test_container_update(self):
+ prop_diff = {'cpu': 10, 'memory': 10, 'name': 'fake-container'}
+ self.plugin.update_container(self.resource_id, **prop_diff)
+ self.client.containers.update.assert_called_once_with(
+ self.resource_id, cpu=10, memory=10, name='fake-container')
+
+ def test_container_update_not_acceptable(self):
+ self.client.containers.update.side_effect = [
+ zc_exc.NotAcceptable(), None]
+ prop_diff = {'cpu': 10, 'memory': 10, 'name': 'fake-container'}
+ self.plugin.update_container(self.resource_id, **prop_diff)
+ self.client.containers.update.assert_has_calls([
+ mock.call(self.resource_id, cpu=10, memory=10,
+ name='fake-container'),
+ mock.call(self.resource_id, cpu=10, memory=10)])
+ self.client.containers.rename.assert_called_once_with(
+ self.resource_id, name='fake-container')
diff --git a/heat/tests/constraints/test_common_constraints.py b/heat/tests/constraints/test_common_constraints.py
index eadd317b2..665c0b903 100644
--- a/heat/tests/constraints/test_common_constraints.py
+++ b/heat/tests/constraints/test_common_constraints.py
@@ -11,6 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
import six
from heat.engine.constraint import common_constraints as cc
@@ -35,6 +36,11 @@ class TestIPConstraint(common.HeatTestCase):
def test_invalidate_ipv4_format(self):
invalidate_format = [
+ None,
+ 123,
+ '1.1',
+ '1.1.',
+ '1.1.1',
'1.1.1.',
'1.1.1.256',
'invalidate format',
@@ -100,7 +106,6 @@ class TestCIDRConstraint(common.HeatTestCase):
validate_format = [
'10.0.0.0/24',
'6000::/64',
- '8.8.8.8'
]
for cidr in validate_format:
self.assertTrue(self.constraint.validate(cidr, None))
@@ -111,11 +116,30 @@ class TestCIDRConstraint(common.HeatTestCase):
'Invalid cidr',
'300.0.0.0/24',
'10.0.0.0/33',
- '8.8.8.0/ 24'
+ '10.0.0/24',
+ '10.0/24',
+ '10.0.a.10/24',
+ '8.8.8.0/ 24',
+ '8.8.8.8'
]
for cidr in invalidate_format:
self.assertFalse(self.constraint.validate(cidr, None))
+ @mock.patch('neutron_lib.api.validators.validate_subnet')
+ def test_validate(self, mock_validate_subnet):
+ test_formats = [
+ '10.0.0/24',
+ '10.0/24',
+ ]
+ self.assertFalse(self.constraint.validate('10.0.0.0/33', None))
+
+ for cidr in test_formats:
+ self.assertFalse(self.constraint.validate(cidr, None))
+
+ mock_validate_subnet.assert_any_call('10.0.0/24')
+ mock_validate_subnet.assert_called_with('10.0/24')
+ self.assertEqual(mock_validate_subnet.call_count, 2)
+
class TestISO8601Constraint(common.HeatTestCase):
diff --git a/heat/tests/convergence/framework/worker_wrapper.py b/heat/tests/convergence/framework/worker_wrapper.py
index 042bc9e07..7ca39ada1 100644
--- a/heat/tests/convergence/framework/worker_wrapper.py
+++ b/heat/tests/convergence/framework/worker_wrapper.py
@@ -37,5 +37,8 @@ class Worker(message_processor.MessageProcessor):
adopt_stack_data,
converge)
+ def stop_traversal(self, current_stack):
+ pass
+
def stop_all_workers(self, current_stack):
pass
diff --git a/heat/tests/engine/service/test_service_engine.py b/heat/tests/engine/service/test_service_engine.py
index 494a2dbe9..421c2e359 100644
--- a/heat/tests/engine/service/test_service_engine.py
+++ b/heat/tests/engine/service/test_service_engine.py
@@ -39,7 +39,7 @@ class ServiceEngineTest(common.HeatTestCase):
def test_make_sure_rpc_version(self):
self.assertEqual(
- '1.35',
+ '1.36',
service.EngineService.RPC_API_VERSION,
('RPC version is changed, please update this test to new version '
'and make sure additional test cases are added for RPC APIs '
@@ -227,6 +227,8 @@ class ServiceEngineTest(common.HeatTestCase):
return_value=mock.Mock())
@mock.patch('heat.engine.service.EngineListener',
return_value=mock.Mock())
+ @mock.patch('heat.engine.worker.WorkerService',
+ return_value=mock.Mock())
@mock.patch('oslo_service.threadgroup.ThreadGroup',
return_value=mock.Mock())
@mock.patch.object(service.EngineService, '_configure_db_conn_pool_size')
@@ -234,16 +236,17 @@ class ServiceEngineTest(common.HeatTestCase):
self,
configure_db_conn_pool_size,
thread_group_class,
+ worker_service_class,
engine_listener_class,
thread_group_manager_class,
sample_uuid_method,
rpc_client_class,
target_class,
rpc_server_method):
- cfg.CONF.set_default('convergence_engine', False)
+ cfg.CONF.set_override('convergence_engine', False)
self._test_engine_service_start(
thread_group_class,
- None,
+ worker_service_class,
engine_listener_class,
thread_group_manager_class,
sample_uuid_method,
@@ -280,7 +283,7 @@ class ServiceEngineTest(common.HeatTestCase):
rpc_client_class,
target_class,
rpc_server_method):
- cfg.CONF.set_default('convergence_engine', True)
+ cfg.CONF.set_override('convergence_engine', True)
self._test_engine_service_start(
thread_group_class,
worker_service_class,
diff --git a/heat/tests/engine/service/test_stack_action.py b/heat/tests/engine/service/test_stack_action.py
index 1f01f3ac4..ccec6bcac 100644
--- a/heat/tests/engine/service/test_stack_action.py
+++ b/heat/tests/engine/service/test_stack_action.py
@@ -44,12 +44,14 @@ class StackServiceActionsTest(common.HeatTestCase):
thread = mock.MagicMock()
mock_link = self.patchobject(thread, 'link')
mock_start.return_value = thread
+ self.patchobject(service, 'NotifyEvent')
result = self.man.stack_suspend(self.ctx, stk.identifier())
self.assertIsNone(result)
mock_load.assert_called_once_with(self.ctx, stack=s)
mock_link.assert_called_once_with(mock.ANY)
- mock_start.assert_called_once_with(stk.id, mock.ANY, stk)
+ mock_start.assert_called_once_with(stk.id, stk.suspend,
+ notify=mock.ANY)
stk.delete()
@@ -64,13 +66,14 @@ class StackServiceActionsTest(common.HeatTestCase):
thread = mock.MagicMock()
mock_link = self.patchobject(thread, 'link')
mock_start.return_value = thread
+ self.patchobject(service, 'NotifyEvent')
result = self.man.stack_resume(self.ctx, stk.identifier())
self.assertIsNone(result)
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
mock_link.assert_called_once_with(mock.ANY)
- mock_start.assert_called_once_with(stk.id, mock.ANY, stk)
+ mock_start.assert_called_once_with(stk.id, stk.resume, notify=mock.ANY)
stk.delete()
@@ -108,6 +111,7 @@ class StackServiceActionsTest(common.HeatTestCase):
stk = utils.parse_stack(t, stack_name=stack_name)
stk.check = mock.Mock()
+ self.patchobject(service, 'NotifyEvent')
mock_load.return_value = stk
mock_start.side_effect = self._mock_thread_start
diff --git a/heat/tests/engine/service/test_stack_create.py b/heat/tests/engine/service/test_stack_create.py
index 88c5971bf..29b99355e 100644
--- a/heat/tests/engine/service/test_stack_create.py
+++ b/heat/tests/engine/service/test_stack_create.py
@@ -10,16 +10,19 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import mock
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_service import threadgroup
import six
+from swiftclient import exceptions
from heat.common import environment_util as env_util
from heat.common import exception
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
+from heat.engine.clients.os import swift
from heat.engine import environment
from heat.engine import properties
from heat.engine.resources.aws.ec2 import instance as instances
@@ -43,7 +46,8 @@ class StackCreateTest(common.HeatTestCase):
@mock.patch.object(threadgroup, 'ThreadGroup')
@mock.patch.object(stack.Stack, 'validate')
def _test_stack_create(self, stack_name, mock_validate, mock_tg,
- environment_files=None):
+ environment_files=None, files_container=None,
+ error=False):
mock_tg.return_value = tools.DummyThreadGroup()
params = {'foo': 'bar'}
@@ -51,29 +55,44 @@ class StackCreateTest(common.HeatTestCase):
stk = tools.get_stack(stack_name, self.ctx)
+ files = None
+ if files_container:
+ files = {'/env/test.yaml': "{'resource_registry': {}}"}
+
mock_tmpl = self.patchobject(templatem, 'Template', return_value=stk.t)
mock_env = self.patchobject(environment, 'Environment',
return_value=stk.env)
mock_stack = self.patchobject(stack, 'Stack', return_value=stk)
mock_merge = self.patchobject(env_util, 'merge_environments')
- result = self.man.create_stack(self.ctx, stack_name,
- template, params, None, {},
- environment_files=environment_files)
- self.assertEqual(stk.identifier(), result)
- self.assertIsInstance(result, dict)
- self.assertTrue(result['stack_id'])
-
- mock_tmpl.assert_called_once_with(template, files=None)
- mock_env.assert_called_once_with(params)
- mock_stack.assert_called_once_with(
- self.ctx, stack_name, stk.t, owner_id=None, nested_depth=0,
- user_creds_id=None, stack_user_project_id=None,
- convergence=cfg.CONF.convergence_engine, parent_resource=None)
-
- if environment_files:
- mock_merge.assert_called_once_with(environment_files, None,
- params, mock.ANY)
- mock_validate.assert_called_once_with()
+ if not error:
+ result = self.man.create_stack(self.ctx, stack_name,
+ template, params, None, {},
+ environment_files=environment_files,
+ files_container=files_container)
+ self.assertEqual(stk.identifier(), result)
+ self.assertIsInstance(result, dict)
+ self.assertTrue(result['stack_id'])
+ mock_tmpl.assert_called_once_with(template, files=files)
+ mock_env.assert_called_once_with(params)
+ mock_stack.assert_called_once_with(
+ self.ctx, stack_name, stk.t, owner_id=None, nested_depth=0,
+ user_creds_id=None, stack_user_project_id=None,
+ convergence=cfg.CONF.convergence_engine, parent_resource=None)
+ if environment_files:
+ mock_merge.assert_called_once_with(environment_files, files,
+ params, mock.ANY)
+ mock_validate.assert_called_once_with()
+ else:
+ ex = self.assertRaises(dispatcher.ExpectedException,
+ self.man.create_stack,
+ self.ctx, stack_name,
+ template, params, None, {},
+ environment_files=environment_files,
+ files_container=files_container)
+ self.assertEqual(exception.NotFound, ex.exc_info[0])
+ self.assertIn('Could not fetch files from container '
+ 'test_container, reason: error.',
+ six.text_type(ex.exc_info[1]))
def test_stack_create(self):
stack_name = 'service_create_test_stack'
@@ -85,6 +104,41 @@ class StackCreateTest(common.HeatTestCase):
self._test_stack_create(stack_name,
environment_files=environment_files)
+ def test_stack_create_with_files_container(self):
+ stack_name = 'env_files_test_stack'
+ environment_files = ['env_1', 'env_2']
+ files_container = 'test_container'
+ fake_get_object = (None, "{'resource_registry': {}}")
+ fake_get_container = ({'x-container-bytes-used': 100},
+ [{'name': '/env/test.yaml'}])
+ mock_client = mock.Mock()
+ mock_client.get_object.return_value = fake_get_object
+ mock_client.get_container.return_value = fake_get_container
+ self.patchobject(swift.SwiftClientPlugin, '_create',
+ return_value=mock_client)
+ self._test_stack_create(stack_name,
+ environment_files=environment_files,
+ files_container=files_container)
+ mock_client.get_container.assert_called_with(files_container)
+ mock_client.get_object.assert_called_with(files_container,
+ '/env/test.yaml')
+
+ def test_stack_create_with_container_notfound_swift(self):
+ stack_name = 'env_files_test_stack'
+ environment_files = ['env_1', 'env_2']
+ files_container = 'test_container'
+ mock_client = mock.Mock()
+ mock_client.get_container.side_effect = exceptions.ClientException(
+ 'error')
+ self.patchobject(swift.SwiftClientPlugin, '_create',
+ return_value=mock_client)
+ self._test_stack_create(stack_name,
+ environment_files=environment_files,
+ files_container=files_container,
+ error=True)
+ mock_client.get_container.assert_called_with(files_container)
+ mock_client.get_object.assert_not_called()
+
def test_stack_create_equals_max_per_tenant(self):
cfg.CONF.set_override('max_stacks_per_tenant', 1)
stack_name = 'service_create_test_stack_equals_max'
diff --git a/heat/tests/engine/service/test_stack_events.py b/heat/tests/engine/service/test_stack_events.py
index 1c3bfa1e3..fa0e8e1d1 100644
--- a/heat/tests/engine/service/test_stack_events.py
+++ b/heat/tests/engine/service/test_stack_events.py
@@ -12,6 +12,8 @@
# under the License.
import mock
+from oslo_config import cfg
+from oslo_messaging import conffixture
from heat.engine import resource as res
from heat.engine.resources.aws.ec2 import instance as instances
@@ -94,6 +96,7 @@ class StackEventTest(common.HeatTestCase):
@tools.stack_context('service_event_list_deleted_resource')
@mock.patch.object(instances.Instance, 'handle_delete')
def test_event_list_deleted_resource(self, mock_delete):
+ self.useFixture(conffixture.ConfFixture(cfg.CONF))
mock_delete.return_value = None
res._register_class('GenericResourceType',
@@ -103,7 +106,7 @@ class StackEventTest(common.HeatTestCase):
thread.link = mock.Mock(return_value=None)
def run(stack_id, func, *args, **kwargs):
- func(*args)
+ func(*args, **kwargs)
return thread
self.eng.thread_group_mgr.start = run
diff --git a/heat/tests/engine/service/test_stack_update.py b/heat/tests/engine/service/test_stack_update.py
index 8848da077..ff4e7943f 100644
--- a/heat/tests/engine/service/test_stack_update.py
+++ b/heat/tests/engine/service/test_stack_update.py
@@ -15,6 +15,7 @@ import uuid
import eventlet.queue
import mock
from oslo_config import cfg
+from oslo_messaging import conffixture
from oslo_messaging.rpc import dispatcher
import six
@@ -26,6 +27,7 @@ from heat.common import template_format
from heat.db.sqlalchemy import api as db_api
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
+from heat.engine.clients.os import swift
from heat.engine import environment
from heat.engine import resource
from heat.engine import service
@@ -43,6 +45,7 @@ class ServiceStackUpdateTest(common.HeatTestCase):
def setUp(self):
super(ServiceStackUpdateTest, self).setUp()
+ self.useFixture(conffixture.ConfFixture(cfg.CONF))
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
self.man.thread_group_mgr = tools.DummyThreadGroupManager()
@@ -68,7 +71,8 @@ class ServiceStackUpdateTest(common.HeatTestCase):
mock_validate = self.patchobject(stk, 'validate', return_value=None)
msgq_mock = mock.Mock()
- self.patchobject(eventlet.queue, 'LightQueue', return_value=msgq_mock)
+ self.patchobject(eventlet.queue, 'LightQueue',
+ side_effect=[msgq_mock, eventlet.queue.LightQueue()])
# do update
api_args = {'timeout_mins': 60, rpc_api.PARAM_CONVERGE: True}
@@ -103,9 +107,9 @@ class ServiceStackUpdateTest(common.HeatTestCase):
mock_load.assert_called_once_with(self.ctx, stack=s)
mock_validate.assert_called_once_with()
- def test_stack_update_with_environment_files(self):
+ def _test_stack_update_with_environment_files(self, stack_name,
+ files_container=None):
# Setup
- stack_name = 'service_update_env_files_stack'
params = {}
template = '{ "Template": "data" }'
old_stack = tools.get_stack(stack_name, self.ctx)
@@ -122,21 +126,47 @@ class ServiceStackUpdateTest(common.HeatTestCase):
self.patchobject(environment, 'Environment', return_value=stk.env)
self.patchobject(stk, 'validate', return_value=None)
self.patchobject(eventlet.queue, 'LightQueue',
- return_value=mock.Mock())
+ side_effect=[mock.Mock(),
+ eventlet.queue.LightQueue()])
mock_merge = self.patchobject(env_util, 'merge_environments')
+ files = None
+ if files_container:
+ files = {'/env/test.yaml': "{'resource_registry': {}}"}
+
# Test
environment_files = ['env_1']
self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None,
{rpc_api.PARAM_CONVERGE: False},
- environment_files=environment_files)
-
+ environment_files=environment_files,
+ files_container=files_container)
# Verify
- mock_merge.assert_called_once_with(environment_files, None,
+ mock_merge.assert_called_once_with(environment_files, files,
params, mock.ANY)
+ def test_stack_update_with_environment_files(self):
+ stack_name = 'service_update_env_files_stack'
+ self._test_stack_update_with_environment_files(stack_name)
+
+ def test_stack_update_with_files_container(self):
+ stack_name = 'env_files_test_stack'
+ files_container = 'test_container'
+ fake_get_object = (None, "{'resource_registry': {}}")
+ fake_get_container = ({'x-container-bytes-used': 100},
+ [{'name': '/env/test.yaml'}])
+ mock_client = mock.Mock()
+ mock_client.get_object.return_value = fake_get_object
+ mock_client.get_container.return_value = fake_get_container
+ self.patchobject(swift.SwiftClientPlugin, '_create',
+ return_value=mock_client)
+ self._test_stack_update_with_environment_files(
+ stack_name, files_container=files_container)
+ mock_client.get_container.assert_called_with(files_container)
+ mock_client.get_object.assert_called_with(files_container,
+ '/env/test.yaml')
+
def test_stack_update_nested(self):
stack_name = 'service_update_nested_test_stack'
parent_stack = tools.get_stack(stack_name + '_parent', self.ctx)
@@ -160,7 +190,8 @@ class ServiceStackUpdateTest(common.HeatTestCase):
mock_validate = self.patchobject(stk, 'validate', return_value=None)
msgq_mock = mock.Mock()
- self.patchobject(eventlet.queue, 'LightQueue', return_value=msgq_mock)
+ self.patchobject(eventlet.queue, 'LightQueue',
+ side_effect=[msgq_mock, eventlet.queue.LightQueue()])
# do update
api_args = {'timeout_mins': 60, rpc_api.PARAM_CONVERGE: False}
@@ -219,6 +250,7 @@ class ServiceStackUpdateTest(common.HeatTestCase):
t['parameters']['newparam'] = {'type': 'number'}
with mock.patch('heat.engine.stack.Stack') as mock_stack:
stk.update = mock.Mock()
+ self.patchobject(service, 'NotifyEvent')
mock_stack.load.return_value = stk
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stk.identifier(),
@@ -275,7 +307,8 @@ resources:
rpc_api.PARAM_CONVERGE: False}
with mock.patch('heat.engine.stack.Stack') as mock_stack:
- stk.update = mock.Mock()
+ loaded_stack.update = mock.Mock()
+ self.patchobject(service, 'NotifyEvent')
mock_stack.load.return_value = loaded_stack
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stk.identifier(),
@@ -318,6 +351,7 @@ resources:
t['parameters']['newparam'] = {'type': 'number'}
with mock.patch('heat.engine.stack.Stack') as mock_stack:
stk.update = mock.Mock()
+ self.patchobject(service, 'NotifyEvent')
mock_stack.load.return_value = stk
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stk.identifier(),
@@ -348,20 +382,20 @@ resources:
# update keep old tags
_, _, updated_stack = self.man._prepare_stack_updates(
- self.ctx, stk, t, {}, None, None, api_args, None)
+ self.ctx, stk, t, {}, None, None, None, api_args, None)
self.assertEqual(['tag1'], updated_stack.tags)
# with new tags
api_args[rpc_api.STACK_TAGS] = ['tag2']
_, _, updated_stack = self.man._prepare_stack_updates(
- self.ctx, stk, t, {}, None, None, api_args, None)
+ self.ctx, stk, t, {}, None, None, None, api_args, None)
self.assertEqual(['tag2'], updated_stack.tags)
# with no PARAM_EXISTING flag and no tags
del api_args[rpc_api.PARAM_EXISTING]
del api_args[rpc_api.STACK_TAGS]
_, _, updated_stack = self.man._prepare_stack_updates(
- self.ctx, stk, t, {}, None, None, api_args, None)
+ self.ctx, stk, t, {}, None, None, None, api_args, None)
self.assertIsNone(updated_stack.tags)
def test_stack_update_existing_registry(self):
@@ -418,6 +452,7 @@ resources:
'myother.yaml': 'myother'}
with mock.patch('heat.engine.stack.Stack') as mock_stack:
stk.update = mock.Mock()
+ self.patchobject(service, 'NotifyEvent')
mock_stack.load.return_value = stk
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stk.identifier(),
@@ -464,6 +499,7 @@ resources:
'resource_registry': {'resources': {}}}
with mock.patch('heat.engine.stack.Stack') as mock_stack:
stk.update = mock.Mock()
+ self.patchobject(service, 'NotifyEvent')
mock_stack.load.return_value = stk
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stk.identifier(),
@@ -864,6 +900,7 @@ resources:
stack.status = stack.COMPLETE
with mock.patch('heat.engine.stack.Stack') as mock_stack:
+ self.patchobject(service, 'NotifyEvent')
mock_stack.load.return_value = stack
mock_stack.validate.return_value = None
result = self.man.update_stack(self.ctx, stack.identifier(),
diff --git a/heat/tests/engine/test_engine_worker.py b/heat/tests/engine/test_engine_worker.py
index eeae9e326..affb51186 100644
--- a/heat/tests/engine/test_engine_worker.py
+++ b/heat/tests/engine/test_engine_worker.py
@@ -209,7 +209,7 @@ class WorkerServiceTest(common.HeatTestCase):
stack.id = 'stack_id'
stack.rollback = mock.MagicMock()
_worker.stop_all_workers(stack)
- mock_st.assert_called_once_with(stack)
+ mock_st.assert_not_called()
mock_cw.assert_called_once_with(stack, mock_tgm, 'engine-001',
_worker._rpc_client)
self.assertFalse(stack.rollback.called)
diff --git a/heat/tests/engine/test_resource_type.py b/heat/tests/engine/test_resource_type.py
index 25bbc05e0..2f67d700d 100644
--- a/heat/tests/engine/test_resource_type.py
+++ b/heat/tests/engine/test_resource_type.py
@@ -44,11 +44,7 @@ class ResourceTypeTest(common.HeatTestCase):
mock_is_service_available.return_value = (True, None)
resources = self.eng.list_resource_types(self.ctx, "DEPRECATED")
self.assertEqual(set(['OS::Aodh::Alarm',
- 'OS::Magnum::Bay',
- 'OS::Magnum::BayModel',
- 'OS::Glance::Image',
- 'OS::Nova::FloatingIP',
- 'OS::Nova::FloatingIPAssociation']),
+ 'OS::Glance::Image']),
set(resources))
@mock.patch.object(res.Resource, 'is_service_available')
diff --git a/heat/tests/openstack/manila/test_share_network.py b/heat/tests/openstack/manila/test_share_network.py
index 5f1ea408f..da723b35b 100644
--- a/heat/tests/openstack/manila/test_share_network.py
+++ b/heat/tests/openstack/manila/test_share_network.py
@@ -152,11 +152,16 @@ class ManilaShareNetworkTest(common.HeatTestCase):
self.assertEqual('share_networks', net.entity)
def test_create_fail(self):
- self.client.share_networks.add_security_service.side_effect = (
- Exception())
+ self.client_plugin.is_conflict.return_value = False
+ self.client.share_networks.add_security_service.side_effect = Exception
self.assertRaises(
exception.ResourceFailure,
self._create_network, 'share_network', self.rsrc_defn, self.stack)
+ csn = self.client.share_networks
+ csn.create.assert_called_with(
+ name='1', description='2', neutron_net_id='3',
+ neutron_subnet_id='4', nova_net_id=None)
+ csn.add_security_service.assert_called_once_with('42', '6')
def test_validate_conflicting_net_subnet(self):
t = template_format.parse(stack_template)
diff --git a/heat/tests/openstack/neutron/test_neutron_port.py b/heat/tests/openstack/neutron/test_neutron_port.py
index 23619b76c..b27b4eda2 100644
--- a/heat/tests/openstack/neutron/test_neutron_port.py
+++ b/heat/tests/openstack/neutron/test_neutron_port.py
@@ -51,7 +51,7 @@ resources:
properties:
network: abcd1234
allowed_address_pairs:
- - ip_address: 10.0.3.21
+ - ip_address: 10.0.3.21/8
mac_address: 00-B0-D0-86-BB-F7
'''
@@ -200,7 +200,7 @@ class NeutronPortTest(common.HeatTestCase):
self.create_mock.assert_called_once_with({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
- 'ip_address': u'10.0.3.21',
+ 'ip_address': u'10.0.3.21/8',
'mac_address': u'00-B0-D0-86-BB-F7'
}],
'name': utils.PhysName(stack.name, 'port'),
@@ -260,7 +260,7 @@ class NeutronPortTest(common.HeatTestCase):
self.create_mock.assert_called_once_with({'port': {
'network_id': u'abcd1234',
'allowed_address_pairs': [{
- 'ip_address': u'10.0.3.21',
+ 'ip_address': u'10.0.3.21/8',
}],
'name': utils.PhysName(stack.name, 'port'),
'admin_state_up': True,
diff --git a/heat/tests/openstack/neutron/test_neutron_security_group_rule.py b/heat/tests/openstack/neutron/test_neutron_security_group_rule.py
index 5e8a5e43f..407f5e9ce 100644
--- a/heat/tests/openstack/neutron/test_neutron_security_group_rule.py
+++ b/heat/tests/openstack/neutron/test_neutron_security_group_rule.py
@@ -68,7 +68,7 @@ class SecurityGroupRuleTest(common.HeatTestCase):
return_value=(True, None))
tmpl = inline_templates.SECURITY_GROUP_RULE_TEMPLATE
- tmpl += ' remote_ip_prefix: "123"'
+ tmpl += ' remote_ip_prefix: "10.0.0.0/8"'
self._create_stack(tmpl=tmpl)
self.assertRaises(exception.ResourcePropertyConflict,
diff --git a/heat/tests/openstack/zun/test_container.py b/heat/tests/openstack/zun/test_container.py
index f4be7b3d3..ca0d4fe28 100644
--- a/heat/tests/openstack/zun/test_container.py
+++ b/heat/tests/openstack/zun/test_container.py
@@ -20,6 +20,8 @@ from zunclient import exceptions as zc_exc
from heat.common import exception
from heat.common import template_format
+from heat.engine.clients.os import neutron
+from heat.engine.clients.os import zun
from heat.engine.resources.openstack.zun import container
from heat.engine import scheduler
from heat.engine import template
@@ -57,9 +59,37 @@ resources:
mount_path: /data
- volume_id: 6ec29ba3-bf2c-4276-a88e-3670ea5abc80
mount_path: /data2
+ networks:
+ - network: mynet
+ fixed_ip: 10.0.0.4
+ - network: mynet2
+ fixed_ip: fe80::3
+ - port: myport
+'''
+
+zun_template_minimum = '''
+heat_template_version: 2017-09-01
+
+resources:
+ test_container:
+ type: OS::Zun::Container
+ properties:
+ name: test_container
+ image: "cirros:latest"
'''
+def create_fake_iface(port=None, net=None, mac=None, ip=None, subnet=None):
+ class fake_interface(object):
+ def __init__(self, port_id, net_id, mac_addr, fixed_ip, subnet_id):
+ self.port_id = port_id
+ self.net_id = net_id
+ self.mac_addr = mac_addr
+ self.fixed_ips = [{'ip_address': fixed_ip, 'subnet_id': subnet_id}]
+
+ return fake_interface(port, net, mac, ip, subnet)
+
+
class ZunContainerTest(common.HeatTestCase):
def setUp(self):
@@ -90,10 +120,18 @@ class ZunContainerTest(common.HeatTestCase):
{'size': 1, 'destination': '/data'},
{'source': '6ec29ba3-bf2c-4276-a88e-3670ea5abc80',
'destination': '/data2'}]
+ self.fake_networks = [
+ {'network': 'mynet', 'port': None, 'fixed_ip': '10.0.0.4'},
+ {'network': 'mynet2', 'port': None, 'fixed_ip': 'fe80::3'},
+ {'network': None, 'port': 'myport', 'fixed_ip': None}]
+ self.fake_networks_args = [
+ {'network': 'mynet', 'v4-fixed-ip': '10.0.0.4'},
+ {'network': 'mynet2', 'v6-fixed-ip': 'fe80::3'},
+ {'port': 'myport'}]
self.fake_network_id = '9c11d847-99ce-4a83-82da-9827362a68e8'
self.fake_network_name = 'private'
- self.fake_networks = {
+ self.fake_networks_attr = {
'networks': [
{
'id': self.fake_network_id,
@@ -125,6 +163,23 @@ class ZunContainerTest(common.HeatTestCase):
self.patchobject(container.Container, 'neutron',
return_value=self.neutron_client)
self.stub_VolumeConstraint_validate()
+ self.mock_update = self.patchobject(zun.ZunClientPlugin,
+ 'update_container')
+ self.stub_PortConstraint_validate()
+ self.mock_find = self.patchobject(
+ neutron.NeutronClientPlugin,
+ 'find_resourceid_by_name_or_id',
+ side_effect=lambda x, y: y)
+ self.mock_attach = self.patchobject(zun.ZunClientPlugin,
+ 'network_attach')
+ self.mock_detach = self.patchobject(zun.ZunClientPlugin,
+ 'network_detach')
+ self.mock_attach_check = self.patchobject(zun.ZunClientPlugin,
+ 'check_network_attach',
+ return_value=True)
+ self.mock_detach_check = self.patchobject(zun.ZunClientPlugin,
+ 'check_network_detach',
+ return_value=True)
def _mock_get_client(self):
value = mock.MagicMock()
@@ -208,6 +263,9 @@ class ZunContainerTest(common.HeatTestCase):
self.assertEqual(
self.fake_mounts,
c.properties.get(container.Container.MOUNTS))
+ self.assertEqual(
+ self.fake_networks,
+ c.properties.get(container.Container.NETWORKS))
scheduler.TaskRunner(c.create)()
self.assertEqual(self.resource_id, c.resource_id)
@@ -230,6 +288,7 @@ class ZunContainerTest(common.HeatTestCase):
hostname=self.fake_hostname,
security_groups=self.fake_security_groups,
mounts=self.fake_mounts_args,
+ nets=self.fake_networks_args,
)
def test_container_create_failed(self):
@@ -263,11 +322,134 @@ class ZunContainerTest(common.HeatTestCase):
rsrc_defns = template.Template(new_t).resource_definitions(self.stack)
new_c = rsrc_defns[self.fake_name]
scheduler.TaskRunner(c.update, new_c)()
- self.client.containers.update.assert_called_once_with(
- self.resource_id, cpu=10, memory=10)
- self.client.containers.rename.assert_called_once_with(
- self.resource_id, name='fake-container')
+ self.mock_update.assert_called_once_with(
+ self.resource_id,
+ cpu=10, memory=10, name='fake-container')
+ self.assertEqual((c.UPDATE, c.COMPLETE), c.state)
+
+ def _test_container_update_None_networks(self, new_networks):
+ t = template_format.parse(zun_template_minimum)
+ stack = utils.parse_stack(t)
+ resource_defns = stack.t.resource_definitions(stack)
+ rsrc_defn = resource_defns[self.fake_name]
+ c = self._create_resource('container', rsrc_defn, stack)
+ scheduler.TaskRunner(c.create)()
+
+ new_t = copy.deepcopy(t)
+ new_t['resources'][self.fake_name]['properties']['networks'] = \
+ new_networks
+ rsrc_defns = template.Template(new_t).resource_definitions(stack)
+ new_c = rsrc_defns[self.fake_name]
+ iface = create_fake_iface(
+ port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ net='450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
+ ip='1.2.3.4')
+ self.client.containers.network_list.return_value = [iface]
+ scheduler.TaskRunner(c.update, new_c)()
+ self.assertEqual((c.UPDATE, c.COMPLETE), c.state)
+ self.client.containers.network_list.assert_called_once_with(
+ self.resource_id)
+
+ def test_container_update_None_networks_with_port(self):
+ new_networks = [{'port': '2a60cbaa-3d33-4af6-a9ce-83594ac546fc'}]
+ self._test_container_update_None_networks(new_networks)
+ self.assertEqual(1, self.mock_attach.call_count)
+ self.assertEqual(1, self.mock_detach.call_count)
+ self.assertEqual(1, self.mock_attach_check.call_count)
+ self.assertEqual(1, self.mock_detach_check.call_count)
+
+ def test_container_update_None_networks_with_network_id(self):
+ new_networks = [{'network': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ 'fixed_ip': '1.2.3.4'}]
+ self._test_container_update_None_networks(new_networks)
+ self.assertEqual(1, self.mock_attach.call_count)
+ self.assertEqual(1, self.mock_detach.call_count)
+ self.assertEqual(1, self.mock_attach_check.call_count)
+ self.assertEqual(1, self.mock_detach_check.call_count)
+
+ def test_container_update_None_networks_with_complex_parameters(self):
+ new_networks = [{'network': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ 'fixed_ip': '1.2.3.4',
+ 'port': '2a60cbaa-3d33-4af6-a9ce-83594ac546fc'}]
+ self._test_container_update_None_networks(new_networks)
+ self.assertEqual(1, self.mock_attach.call_count)
+ self.assertEqual(1, self.mock_detach.call_count)
+ self.assertEqual(1, self.mock_attach_check.call_count)
+ self.assertEqual(1, self.mock_detach_check.call_count)
+
+ def test_server_update_empty_networks_to_None(self):
+ new_networks = None
+ self._test_container_update_None_networks(new_networks)
+ self.assertEqual(0, self.mock_attach.call_count)
+ self.assertEqual(0, self.mock_detach.call_count)
+ self.assertEqual(0, self.mock_attach_check.call_count)
+ self.assertEqual(0, self.mock_detach_check.call_count)
+
+ def _test_container_update_networks(self, new_networks):
+ c = self._create_resource('container', self.rsrc_defn, self.stack)
+ scheduler.TaskRunner(c.create)()
+ t = template_format.parse(zun_template)
+ new_t = copy.deepcopy(t)
+ new_t['resources'][self.fake_name]['properties']['networks'] = \
+ new_networks
+ rsrc_defns = template.Template(new_t).resource_definitions(self.stack)
+ new_c = rsrc_defns[self.fake_name]
+ sec_uuids = ['86c0f8ae-23a8-464f-8603-c54113ef5467']
+ self.patchobject(neutron.NeutronClientPlugin,
+ 'get_secgroup_uuids', return_value=sec_uuids)
+ ifaces = [
+ create_fake_iface(port='95e25541-d26a-478d-8f36-ae1c8f6b74dc',
+ net='mynet',
+ ip='10.0.0.4'),
+ create_fake_iface(port='450abbc9-9b6d-4d6f-8c3a-c47ac34100ef',
+ net='mynet2',
+ ip='fe80::3'),
+ create_fake_iface(port='myport',
+ net='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ ip='21.22.23.24')]
+ self.client.containers.network_list.return_value = ifaces
+ scheduler.TaskRunner(c.update, new_c)()
self.assertEqual((c.UPDATE, c.COMPLETE), c.state)
+ self.client.containers.network_list.assert_called_once_with(
+ self.resource_id)
+
+ def test_container_update_networks_with_complex_parameters(self):
+ new_networks = [
+ {'network': 'mynet',
+ 'fixed_ip': '10.0.0.4'},
+ {'port': '2a60cbaa-3d33-4af6-a9ce-83594ac546fc'}]
+ self._test_container_update_networks(new_networks)
+ self.assertEqual(2, self.mock_detach.call_count)
+ self.assertEqual(1, self.mock_attach.call_count)
+ self.assertEqual(2, self.mock_detach_check.call_count)
+ self.assertEqual(1, self.mock_attach_check.call_count)
+
+ def test_container_update_networks_with_None(self):
+ new_networks = None
+ self._test_container_update_networks(new_networks)
+ self.assertEqual(3, self.mock_detach.call_count)
+ self.assertEqual(1, self.mock_attach.call_count)
+ self.assertEqual(3, self.mock_detach_check.call_count)
+ self.assertEqual(1, self.mock_attach_check.call_count)
+
+ def test_container_update_old_networks_to_empty_list(self):
+ new_networks = []
+ self._test_container_update_networks(new_networks)
+ self.assertEqual(3, self.mock_detach.call_count)
+ self.assertEqual(1, self.mock_attach.call_count)
+ self.assertEqual(3, self.mock_detach_check.call_count)
+ self.assertEqual(1, self.mock_attach_check.call_count)
+
+ def test_container_update_remove_network_non_empty(self):
+ new_networks = [
+ {'network': 'mynet',
+ 'fixed_ip': '10.0.0.4'},
+ {'port': 'myport'}]
+ self._test_container_update_networks(new_networks)
+ self.assertEqual(1, self.mock_detach.call_count)
+ self.assertEqual(0, self.mock_attach.call_count)
+ self.assertEqual(1, self.mock_detach_check.call_count)
+ self.assertEqual(0, self.mock_attach_check.call_count)
def test_container_delete(self):
c = self._create_resource('container', self.rsrc_defn, self.stack)
@@ -304,7 +486,8 @@ class ZunContainerTest(common.HeatTestCase):
}, reality)
def test_resolve_attributes(self):
- self.neutron_client.list_networks.return_value = self.fake_networks
+ self.neutron_client.list_networks.return_value = \
+ self.fake_networks_attr
c = self._create_resource('container', self.rsrc_defn, self.stack)
scheduler.TaskRunner(c.create)()
self._mock_get_client()
diff --git a/heat/tests/test_convg_stack.py b/heat/tests/test_convg_stack.py
index df24fb2e9..8e6c5d87f 100644
--- a/heat/tests/test_convg_stack.py
+++ b/heat/tests/test_convg_stack.py
@@ -21,6 +21,7 @@ from heat.engine import stack as parser
from heat.engine import template as templatem
from heat.objects import raw_template as raw_template_object
from heat.objects import resource as resource_objects
+from heat.objects import snapshot as snapshot_objects
from heat.objects import stack as stack_object
from heat.objects import sync_point as sync_point_object
from heat.rpc import worker_client
@@ -509,6 +510,37 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
self.assertTrue(mock_syncpoint_del.called)
self.assertTrue(mock_ccu.called)
+ def test_snapshot_delete(self, mock_cr):
+ tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
+ 'Resources': {'R1': {'Type': 'GenericResourceType'}}}
+ stack = parser.Stack(utils.dummy_context(), 'updated_time_test',
+ templatem.Template(tmpl))
+ stack.current_traversal = 'prev_traversal'
+ stack.action, stack.status = stack.CREATE, stack.COMPLETE
+ stack.store()
+ stack.thread_group_mgr = tools.DummyThreadGroupManager()
+ snapshot_values = {
+ 'stack_id': stack.id,
+ 'name': 'fake_snapshot',
+ 'tenant': stack.context.tenant_id,
+ 'status': 'COMPLETE',
+ 'data': None
+ }
+ snapshot_objects.Snapshot.create(stack.context, snapshot_values)
+
+ # Ensure that snapshot is not deleted on stack update
+ stack.converge_stack(template=stack.t, action=stack.UPDATE)
+ db_snapshot_obj = snapshot_objects.Snapshot.get_all(
+ stack.context, stack.id)
+ self.assertEqual('fake_snapshot', db_snapshot_obj[0].name)
+ self.assertEqual(stack.id, db_snapshot_obj[0].stack_id)
+
+ # Ensure that snapshot is deleted on stack delete
+ stack.converge_stack(template=stack.t, action=stack.DELETE)
+ self.assertEqual([], snapshot_objects.Snapshot.get_all(
+ stack.context, stack.id))
+ self.assertTrue(mock_cr.called)
+
@mock.patch.object(parser.Stack, '_persist_state')
class TestConvgStackStateSet(common.HeatTestCase):
diff --git a/heat/tests/test_engine_service.py b/heat/tests/test_engine_service.py
index 809999aeb..671c65a2c 100644
--- a/heat/tests/test_engine_service.py
+++ b/heat/tests/test_engine_service.py
@@ -1324,13 +1324,15 @@ class StackServiceTest(common.HeatTestCase):
# get parameters from adopt stack data which doesn't have it.
args = {"adopt_stack_data": '''{}'''}
self.eng._parse_template_and_validate_stack(
- self.ctx, 'stack_name', template, {}, {}, None, args)
+ self.ctx, 'stack_name', template, {}, {},
+ None, None, args)
args = {"adopt_stack_data": '''{
"environment": {}
}'''}
self.eng._parse_template_and_validate_stack(
- self.ctx, 'stack_name', template, {}, {}, None, args)
+ self.ctx, 'stack_name', template, {}, {},
+ None, None, args)
def test_parse_adopt_stack_data_with_parameters(self):
cfg.CONF.set_override('enable_stack_adopt', True)
@@ -1355,7 +1357,8 @@ class StackServiceTest(common.HeatTestCase):
}
}}'''}
stack = self.eng._parse_template_and_validate_stack(
- self.ctx, 'stack_name', template, {}, {}, None, args)
+ self.ctx, 'stack_name', template, {}, {},
+ None, None, args)
self.assertEqual(1, stack.parameters['volsize'])
@mock.patch('heat.engine.service.ThreadGroupManager',
diff --git a/heat/tests/test_rpc_client.py b/heat/tests/test_rpc_client.py
index 7aa7fc351..7bf414216 100644
--- a/heat/tests/test_rpc_client.py
+++ b/heat/tests/test_rpc_client.py
@@ -165,6 +165,7 @@ class EngineRpcAPITestCase(common.HeatTestCase):
params={u'InstanceType': u'm1.xlarge'},
files={u'a_file': u'the contents'},
environment_files=['foo.yaml'],
+ files_container=None,
args={'timeout_mins': u'30'})
def test_create_stack(self):
@@ -173,6 +174,7 @@ class EngineRpcAPITestCase(common.HeatTestCase):
params={u'InstanceType': u'm1.xlarge'},
files={u'a_file': u'the contents'},
environment_files=['foo.yaml'],
+ files_container=None,
args={'timeout_mins': u'30'})
call_kwargs = copy.deepcopy(kwargs)
call_kwargs['owner_id'] = None
@@ -191,6 +193,7 @@ class EngineRpcAPITestCase(common.HeatTestCase):
params={u'InstanceType': u'm1.xlarge'},
files={},
environment_files=['foo.yaml'],
+ files_container=None,
args=mock.ANY)
call_kwargs = copy.deepcopy(kwargs)
call_kwargs['template_id'] = None
@@ -206,6 +209,7 @@ class EngineRpcAPITestCase(common.HeatTestCase):
params={u'InstanceType': u'm1.xlarge'},
files={},
environment_files=['foo.yaml'],
+ files_container=None,
args=mock.ANY)
def test_get_template(self):
@@ -226,6 +230,7 @@ class EngineRpcAPITestCase(common.HeatTestCase):
params={u'Egg': u'spam'},
files=None,
environment_files=['foo.yaml'],
+ files_container=None,
ignorable_errors=None,
show_nested=False,
version='1.24')
diff --git a/lower-constraints.txt b/lower-constraints.txt
index f7141f463..97b759c52 100644
--- a/lower-constraints.txt
+++ b/lower-constraints.txt
@@ -56,10 +56,10 @@ msgpack==0.5.6
munch==2.2.0
netaddr==0.7.18
netifaces==0.10.6
+neutron-lib==1.14.0
openstacksdk==0.11.2
os-client-config==1.29.0
os-service-types==1.2.0
-os-testr==1.0.0
osc-lib==1.10.0
oslo.cache==1.26.0
oslo.concurrency==3.26.0
@@ -101,6 +101,7 @@ pyOpenSSL==17.5.0
pyparsing==2.2.0
pyperclip==1.6.0
python-barbicanclient==4.5.2
+python-blazarclient===1.0.0
python-ceilometerclient==2.5.0
python-cinderclient==3.3.0
python-dateutil==2.7.0
@@ -123,10 +124,9 @@ python-subunit==1.2.0
python-swiftclient==3.2.0
python-troveclient==2.2.0
python-zaqarclient==1.0.0
-python-zunclient==1.3.0
+python-zunclient==2.0.0
pytz==2013.6
PyYAML==3.12
-qpid-python==0.26;python_version=='2.7' # Apache-2.0
repoze.lru==0.7
requests==2.14.2
requestsexceptions==1.4.0
@@ -144,7 +144,6 @@ stevedore==1.20.0
tempest==17.1.0
Tempita==0.5.2
tenacity==4.4.0
-testrepository==0.0.18
testresources==2.0.0
testscenarios==0.4
testtools==2.2.0
diff --git a/releasenotes/notes/add-blazar-client-plugin-e2077e8646ca5f1a.yaml b/releasenotes/notes/add-blazar-client-plugin-e2077e8646ca5f1a.yaml
new file mode 100644
index 000000000..aafae4672
--- /dev/null
+++ b/releasenotes/notes/add-blazar-client-plugin-e2077e8646ca5f1a.yaml
@@ -0,0 +1,4 @@
+---
+other:
+ - |
+ Introduce a Blazar client plugin module that will be used by Blazar resources.
diff --git a/releasenotes/notes/add-networks-properties-to-container-resource-b1936d7e113aafbc.yaml b/releasenotes/notes/add-networks-properties-to-container-resource-b1936d7e113aafbc.yaml
new file mode 100644
index 000000000..cce46e0a3
--- /dev/null
+++ b/releasenotes/notes/add-networks-properties-to-container-resource-b1936d7e113aafbc.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Add a new property ``networks`` to resource OS::Zun::Container.
+ This property is an ordered list of nics to be added to this container,
+ with information about connected networks, fixed ips, and port.
+ This property can be updated without replacement.
diff --git a/releasenotes/notes/convergence-delete-race-5b821bbd4c5ba5dc.yaml b/releasenotes/notes/convergence-delete-race-5b821bbd4c5ba5dc.yaml
new file mode 100644
index 000000000..f06a90619
--- /dev/null
+++ b/releasenotes/notes/convergence-delete-race-5b821bbd4c5ba5dc.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Previously, when deleting a convergence stack, the API call would return
+ immediately, so that it was possible for a client immediately querying the
+ status of the stack to see the state of the previous operation in progress
+ or having failed, and confuse that with a current status. (This included
+ Heat itself when acting as a client for a nested stack.) Convergence stacks
+ are now guaranteed to have moved to the ``DELETE_IN_PROGRESS`` state before
+ the delete API call returns, so any subsequent polling will reflect
+ up-to-date information.
diff --git a/releasenotes/notes/hidden-resources-529934d5eb519c86.yaml b/releasenotes/notes/hidden-resources-529934d5eb519c86.yaml
new file mode 100644
index 000000000..dbc2afd48
--- /dev/null
+++ b/releasenotes/notes/hidden-resources-529934d5eb519c86.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - Resource type ``OS::Magnum::Bay`` is now hidden, please use
+ ``OS::Magnum::Cluster`` instead.
+ - Resource type ``OS::Magnum::BayModele`` is now hidden, please use
+ ``OS::Magnum::ClusterTemplate`` instead.
+ - Resource type ``OS::Nova::FloatingIP`` is now hidden, please use
+ ``OS::Neutron::FloatingIP`` instead.
+ - Resource type ``OS::Nova::FloatingIPAssociation`` is now hidden, please use
+ ``OS::Neutron::FloatingIPAssociation`` instead.
diff --git a/releasenotes/notes/legacy-client-races-ba7a60cef5ec1694.yaml b/releasenotes/notes/legacy-client-races-ba7a60cef5ec1694.yaml
new file mode 100644
index 000000000..6371f2c09
--- /dev/null
+++ b/releasenotes/notes/legacy-client-races-ba7a60cef5ec1694.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Previously, the suspend, resume, and check API calls for all stacks, and
+ the update, restore, and delete API calls for non-convergence stacks,
+ returned immediately after starting the stack operation. This meant that
+ for a client reading the state immediately when performing the same
+ operation twice in a row, it could have misinterpreted a previous state as
+ the latest unless careful reference were made to the updated_at timestamp.
+ Stacks are now guaranteed to have moved to the ``IN_PROGRESS`` state before
+ any of these APIs return (except in the case of deleting a non-convergence
+ stack where another operation was already in progress).
diff --git a/releasenotes/notes/migrate-to-storyboard-36c90857d3167322.yaml b/releasenotes/notes/migrate-to-storyboard-36c90857d3167322.yaml
new file mode 100644
index 000000000..f022bab79
--- /dev/null
+++ b/releasenotes/notes/migrate-to-storyboard-36c90857d3167322.yaml
@@ -0,0 +1,10 @@
+---
+prelude: |
+ Heat current bug/blueprint reports have migrated from Launchpad to
+ `storyboard`_. If you would like to create a new story (a bug or a
+ blueprint), please file it under the `Heat project`_. This change
+ applies to `all heat projects/repos`_.
+
+ .. _storyboard: https://storyboard.openstack.org/#!/project_group/82
+ .. _Heat project: https://storyboard.openstack.org/#!/project/989
+ .. _all heat projects/repos: https://storyboard.openstack.org/#!/project_group/82
diff --git a/releasenotes/notes/support-files-from-swift-f27fd2d788d67580.yaml b/releasenotes/notes/support-files-from-swift-f27fd2d788d67580.yaml
new file mode 100644
index 000000000..3eeec062a
--- /dev/null
+++ b/releasenotes/notes/support-files-from-swift-f27fd2d788d67580.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Added stack API support to provide a swift container
+ that contains the child templates and environment files.
+ All files would be fetched and used (if required), unless
+ they are superceded by files in files map.
diff --git a/requirements.txt b/requirements.txt
index 8c5d4fc4d..af7fb54a7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,6 +11,7 @@ keystoneauth1>=3.4.0 # Apache-2.0
keystonemiddleware>=4.17.0 # Apache-2.0
lxml!=3.7.0,>=3.4.1 # BSD
netaddr>=0.7.18 # BSD
+neutron-lib>=1.14.0 # Apache-2.0
openstacksdk>=0.11.2 # Apache-2.0
oslo.cache>=1.26.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
@@ -31,6 +32,7 @@ oslo.versionedobjects>=1.31.2 # Apache-2.0
PasteDeploy>=1.5.0 # MIT
aodhclient>=0.9.0 # Apache-2.0
python-barbicanclient>=4.5.2 # Apache-2.0
+python-blazarclient>=1.0.0 # Apache-2.0
python-ceilometerclient>=2.5.0 # Apache-2.0
python-cinderclient>=3.3.0 # Apache-2.0
python-designateclient>=2.7.0 # Apache-2.0
@@ -49,7 +51,7 @@ python-saharaclient>=1.4.0 # Apache-2.0
python-swiftclient>=3.2.0 # Apache-2.0
python-troveclient>=2.2.0 # Apache-2.0
python-zaqarclient>=1.0.0 # Apache-2.0
-python-zunclient>=1.3.0 # Apache-2.0
+python-zunclient>=2.0.0 # Apache-2.0
pytz>=2013.6 # MIT
PyYAML>=3.12 # MIT
requests>=2.14.2 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 77d9e05d7..4cdd0b5db 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -65,6 +65,7 @@ oslo.policy.policies =
heat.clients =
aodh = heat.engine.clients.os.aodh:AodhClientPlugin
barbican = heat.engine.clients.os.barbican:BarbicanClientPlugin
+ blazar = heat.engine.clients.os.blazar:BlazarClientPlugin
ceilometer = heat.engine.clients.os.ceilometer:CeilometerClientPlugin
cinder = heat.engine.clients.os.cinder:CinderClientPlugin
designate = heat.engine.clients.os.designate:DesignateClientPlugin
diff --git a/test-requirements.txt b/test-requirements.txt
index 07d544c27..4500a40b8 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -10,11 +10,9 @@ fixtures>=3.0.0 # Apache-2.0/BSD
kombu!=4.0.2,>=4.0.0 # BSD
mock>=2.0.0 # BSD
PyMySQL>=0.7.6 # MIT License
-os-testr>=1.0.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
-qpid-python>=0.26;python_version=='2.7' # Apache-2.0
psycopg2>=2.6.2 # LGPL/ZPL
-testrepository>=0.0.18 # Apache-2.0/BSD
+stestr>=2.0.0 # Apache-2.0
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=2.2.0 # MIT
testresources>=2.0.0 # Apache-2.0/BSD
diff --git a/tools/dashboards/heat.dash b/tools/dashboards/heat.dash
index 57725c70a..a272b987d 100644
--- a/tools/dashboards/heat.dash
+++ b/tools/dashboards/heat.dash
@@ -11,11 +11,8 @@ foreach = (project:openstack/heat OR project:openstack/heat-agents OR
[section "Heat Specs"]
query = project:openstack/heat-specs
-[section "Bug Fixes"]
-query = intopic:^bug/.*
-
-[section "Blueprints"]
-query = message:"Blueprint"
+[section "Stories & Bug Fixes"]
+query = (intopic: ^story/.* OR intopic: ^bug/.* OR intopic: ^bp/.*)
[section "Needs Feedback (Changes older than 5 days that have not been reviewed by anyone)"]
query = NOT label:Code-Review>=1 NOT label:Code-Review<=-1 age:5d