summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib')
-rw-r--r--contrib/heat_docker/heat_docker/resources/docker_container.py3
-rw-r--r--contrib/rackspace/heat_keystoneclient_v2/client.py4
-rw-r--r--contrib/rackspace/rackspace/clients.py2
-rw-r--r--contrib/rackspace/rackspace/resources/auto_scale.py180
-rw-r--r--contrib/rackspace/rackspace/resources/cloud_loadbalancer.py3
-rw-r--r--contrib/rackspace/rackspace/resources/cloud_server.py67
-rw-r--r--contrib/rackspace/rackspace/resources/cloudnetworks.py8
-rw-r--r--contrib/rackspace/rackspace/resources/lb_node.py230
-rw-r--r--contrib/rackspace/rackspace/tests/test_auto_scale.py575
-rw-r--r--contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py145
-rw-r--r--contrib/rackspace/rackspace/tests/test_lb_node.py305
-rw-r--r--contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py86
-rw-r--r--contrib/rackspace/requirements.txt2
13 files changed, 1483 insertions, 127 deletions
diff --git a/contrib/heat_docker/heat_docker/resources/docker_container.py b/contrib/heat_docker/heat_docker/resources/docker_container.py
index 9a9d52112..44144e772 100644
--- a/contrib/heat_docker/heat_docker/resources/docker_container.py
+++ b/contrib/heat_docker/heat_docker/resources/docker_container.py
@@ -554,7 +554,8 @@ def available_resource_mapping():
if DOCKER_INSTALLED:
return resource_mapping()
else:
- LOG.warn(_LW("Docker plug-in loaded, but docker lib not installed."))
+ LOG.warning(_LW("Docker plug-in loaded, but docker lib "
+ "not installed."))
return {}
diff --git a/contrib/rackspace/heat_keystoneclient_v2/client.py b/contrib/rackspace/heat_keystoneclient_v2/client.py
index aa23b9f75..66bc8d431 100644
--- a/contrib/rackspace/heat_keystoneclient_v2/client.py
+++ b/contrib/rackspace/heat_keystoneclient_v2/client.py
@@ -156,8 +156,8 @@ class KeystoneClientV2(object):
Returns the keystone ID of the resulting user
"""
if len(username) > 64:
- LOG.warn(_LW("Truncating the username %s to the last 64 "
- "characters."), username)
+ LOG.warning(_LW("Truncating the username %s to the last 64 "
+ "characters."), username)
# get the last 64 characters of the username
username = username[-64:]
user = self.client.users.create(username,
diff --git a/contrib/rackspace/rackspace/clients.py b/contrib/rackspace/rackspace/clients.py
index dd737b6ec..676d3c9e3 100644
--- a/contrib/rackspace/rackspace/clients.py
+++ b/contrib/rackspace/rackspace/clients.py
@@ -65,7 +65,7 @@ class RackspaceClientPlugin(client_plugin.ClientPlugin):
tenant_id=tenant,
tenant_name=tenant_name)
if not self.pyrax.authenticated:
- LOG.warn(_LW("Pyrax Authentication Failed."))
+ LOG.warning(_LW("Pyrax Authentication Failed."))
raise exception.AuthorizationFailure()
LOG.info(_LI("User %s authenticated successfully."),
self.context.username)
diff --git a/contrib/rackspace/rackspace/resources/auto_scale.py b/contrib/rackspace/rackspace/resources/auto_scale.py
index d9a6b1eca..339a79b95 100644
--- a/contrib/rackspace/rackspace/resources/auto_scale.py
+++ b/contrib/rackspace/rackspace/resources/auto_scale.py
@@ -14,13 +14,17 @@
"""Resources for Rackspace Auto Scale."""
import copy
+import six
+from heat.common import exception
from heat.common.i18n import _
+from heat.common import template_format
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
+from heat.engine import template as templatem
try:
from pyrax.exceptions import Forbidden
@@ -73,9 +77,11 @@ class Group(resource.Resource):
_LAUNCH_CONFIG_ARGS_KEYS = (
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS,
LAUNCH_CONFIG_ARGS_SERVER,
+ LAUNCH_CONFIG_ARGS_STACK,
) = (
'loadBalancers',
'server',
+ 'stack',
)
_LAUNCH_CONFIG_ARGS_LOAD_BALANCER_KEYS = (
@@ -114,12 +120,31 @@ class Group(resource.Resource):
'uuid',
)
+ _LAUNCH_CONFIG_ARGS_STACK_KEYS = (
+ LAUNCH_CONFIG_ARGS_STACK_TEMPLATE,
+ LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL,
+ LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK,
+ LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT,
+ LAUNCH_CONFIG_ARGS_STACK_FILES,
+ LAUNCH_CONFIG_ARGS_STACK_PARAMETERS,
+ LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS
+ ) = (
+ 'template',
+ 'template_url',
+ 'disable_rollback',
+ 'environment',
+ 'files',
+ 'parameters',
+ 'timeout_mins'
+ )
+
_launch_configuration_args_schema = {
LAUNCH_CONFIG_ARGS_LOAD_BALANCERS: properties.Schema(
properties.Schema.LIST,
_('List of load balancers to hook the '
'server up to. If not specified, no '
'load balancing will be configured.'),
+ default=[],
schema=properties.Schema(
properties.Schema.MAP,
schema={
@@ -130,8 +155,7 @@ class Group(resource.Resource):
),
LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT: properties.Schema(
properties.Schema.INTEGER,
- _('Server port to connect the load balancer to.'),
- required=True
+ _('Server port to connect the load balancer to.')
),
},
)
@@ -140,6 +164,7 @@ class Group(resource.Resource):
properties.Schema.MAP,
_('Server creation arguments, as accepted by the Cloud Servers '
'server creation API.'),
+ required=False,
schema={
LAUNCH_CONFIG_ARGS_SERVER_NAME: properties.Schema(
properties.Schema.STRING,
@@ -210,8 +235,49 @@ class Group(resource.Resource):
'key-based authentication to the server.')
),
},
- required=True
),
+ LAUNCH_CONFIG_ARGS_STACK: properties.Schema(
+ properties.Schema.MAP,
+ _('The attributes that Auto Scale uses to create a new stack. The '
+ 'attributes that you specify for the stack entity apply to all '
+ 'new stacks in the scaling group. Note the stack arguments are '
+ 'directly passed to Heat when creating a stack.'),
+ schema={
+ LAUNCH_CONFIG_ARGS_STACK_TEMPLATE: properties.Schema(
+ properties.Schema.STRING,
+ _('The template that describes the stack. Either the '
+ 'template or template_url property must be specified.'),
+ ),
+ LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL: properties.Schema(
+ properties.Schema.STRING,
+ _('A URI to a template. Either the template or '
+ 'template_url property must be specified.')
+ ),
+ LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('Keep the resources that have been created if the stack '
+ 'fails to create. Defaults to True.'),
+ default=True
+ ),
+ LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT: properties.Schema(
+ properties.Schema.MAP,
+ _('The environment for the stack.'),
+ ),
+ LAUNCH_CONFIG_ARGS_STACK_FILES: properties.Schema(
+ properties.Schema.MAP,
+ _('The contents of files that the template references.')
+ ),
+ LAUNCH_CONFIG_ARGS_STACK_PARAMETERS: properties.Schema(
+ properties.Schema.MAP,
+ _('Key/value pairs of the parameters and their values to '
+ 'pass to the parameters in the template.')
+ ),
+ LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS: properties.Schema(
+ properties.Schema.INTEGER,
+ _('The stack creation timeout in minutes.')
+ )
+ }
+ )
}
properties_schema = {
@@ -255,17 +321,18 @@ class Group(resource.Resource):
schema={
LAUNCH_CONFIG_ARGS: properties.Schema(
properties.Schema.MAP,
- _('Type-specific server launching arguments.'),
+ _('Type-specific launch arguments.'),
schema=_launch_configuration_args_schema,
required=True
),
LAUNCH_CONFIG_TYPE: properties.Schema(
properties.Schema.STRING,
- _('Launch configuration method. Only launch_server '
- 'is currently supported.'),
+ _('Launch configuration method. Only launch_server and '
+ 'launch_stack are currently supported.'),
required=True,
constraints=[
- constraints.AllowedValues(['launch_server']),
+ constraints.AllowedValues(['launch_server',
+ 'launch_stack']),
]
),
},
@@ -286,16 +353,19 @@ class Group(resource.Resource):
max_entities=groupconf[self.GROUP_CONFIGURATION_MAX_ENTITIES],
metadata=groupconf.get(self.GROUP_CONFIGURATION_METADATA, None))
- def _get_launch_config_args(self, launchconf):
- """Get the launchConfiguration-related pyrax arguments."""
+ def _get_launch_config_server_args(self, launchconf):
lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
server_args = lcargs[self.LAUNCH_CONFIG_ARGS_SERVER]
lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
lbs = copy.deepcopy(lb_args)
- if lbs:
- for lb in lbs:
- lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
- lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
+ for lb in lbs:
+ # if the port is not specified, the lbid must be that of a
+ # RackConnectV3 lb pool.
+ if not lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]:
+ del lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT]
+ continue
+ lbid = int(lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID])
+ lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID] = lbid
personality = server_args.get(
self.LAUNCH_CONFIG_ARGS_SERVER_PERSONALITY)
if personality:
@@ -304,9 +374,9 @@ class Group(resource.Resource):
user_data = server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_USER_DATA)
cdrive = (server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_CDRIVE) or
bool(user_data is not None and len(user_data.strip())))
- image_id = self.client_plugin('glance').get_image_id(
+ image_id = self.client_plugin('glance').find_image_by_name_or_id(
server_args[self.LAUNCH_CONFIG_ARGS_SERVER_IMAGE_REF])
- flavor_id = self.client_plugin('nova').get_flavor_id(
+ flavor_id = self.client_plugin('nova').find_flavor_by_name_or_id(
server_args[self.LAUNCH_CONFIG_ARGS_SERVER_FLAVOR_REF])
return dict(
@@ -325,6 +395,30 @@ class Group(resource.Resource):
key_name=server_args.get(self.LAUNCH_CONFIG_ARGS_SERVER_KEY_NAME),
)
+ def _get_launch_config_stack_args(self, launchconf):
+ lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
+ stack_args = lcargs[self.LAUNCH_CONFIG_ARGS_STACK]
+ return dict(
+ launch_config_type=launchconf[self.LAUNCH_CONFIG_TYPE],
+ template=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE],
+ template_url=stack_args[
+ self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL],
+ disable_rollback=stack_args[
+ self.LAUNCH_CONFIG_ARGS_STACK_DISABLE_ROLLBACK],
+ environment=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT],
+ files=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_FILES],
+ parameters=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_PARAMETERS],
+ timeout_mins=stack_args[self.LAUNCH_CONFIG_ARGS_STACK_TIMEOUT_MINS]
+ )
+
+ def _get_launch_config_args(self, launchconf):
+ """Get the launchConfiguration-related pyrax arguments."""
+ if launchconf[self.LAUNCH_CONFIG_ARGS].get(
+ self.LAUNCH_CONFIG_ARGS_SERVER):
+ return self._get_launch_config_server_args(launchconf)
+ else:
+ return self._get_launch_config_stack_args(launchconf)
+
def _get_create_args(self):
"""Get pyrax-style arguments for creating a scaling group."""
args = self._get_group_config_args(
@@ -391,6 +485,62 @@ class Group(resource.Resource):
else:
return True
+ def _check_rackconnect_v3_pool_exists(self, pool_id):
+ pools = self.client("rackconnect").list_load_balancer_pools()
+ if pool_id in (p.id for p in pools):
+ return True
+ return False
+
+ def validate(self):
+ super(Group, self).validate()
+ launchconf = self.properties[self.LAUNCH_CONFIGURATION]
+ lcargs = launchconf[self.LAUNCH_CONFIG_ARGS]
+
+ server_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_SERVER)
+ st_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_STACK)
+
+ # launch_server and launch_stack are required and mutually exclusive.
+ if ((not server_args and not st_args) or
+ (server_args and st_args)):
+ msg = (_('Must provide one of %(server)s or %(stack)s in %(conf)s')
+ % {'server': self.LAUNCH_CONFIG_ARGS_SERVER,
+ 'stack': self.LAUNCH_CONFIG_ARGS_STACK,
+ 'conf': self.LAUNCH_CONFIGURATION})
+ raise exception.StackValidationFailed(msg)
+
+ lb_args = lcargs.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCERS)
+ lbs = copy.deepcopy(lb_args)
+ for lb in lbs:
+ lb_port = lb.get(self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_PORT)
+ lb_id = lb[self.LAUNCH_CONFIG_ARGS_LOAD_BALANCER_ID]
+ if not lb_port:
+ # check if lb id is a valid RCV3 pool id
+ if not self._check_rackconnect_v3_pool_exists(lb_id):
+ msg = _('Could not find RackConnectV3 pool '
+ 'with id %s') % (lb_id)
+ raise exception.StackValidationFailed(msg)
+
+ if st_args:
+ st_tmpl = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE)
+ st_tmpl_url = st_args.get(
+ self.LAUNCH_CONFIG_ARGS_STACK_TEMPLATE_URL)
+ st_env = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_ENVIRONMENT)
+ # template and template_url are required and mutually exclusive.
+ if ((not st_tmpl and not st_tmpl_url) or
+ (st_tmpl and st_tmpl_url)):
+ msg = _('Must provide one of template or template_url.')
+ raise exception.StackValidationFailed(msg)
+
+ if st_tmpl:
+ st_files = st_args.get(self.LAUNCH_CONFIG_ARGS_STACK_FILES)
+ try:
+ tmpl = template_format.simple_parse(st_tmpl)
+ templatem.Template(tmpl, files=st_files, env=st_env)
+ except Exception as exc:
+ msg = (_('Encountered error while loading template: %s') %
+ six.text_type(exc))
+ raise exception.StackValidationFailed(msg)
+
def auto_scale(self):
return self.client('auto_scale')
diff --git a/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py b/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py
index 2bf125028..b35642888 100644
--- a/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py
+++ b/contrib/rackspace/rackspace/resources/cloud_loadbalancer.py
@@ -219,6 +219,7 @@ class CloudLoadBalancer(resource.Resource):
),
NODE_TYPE: properties.Schema(
properties.Schema.STRING,
+ default='PRIMARY',
constraints=[
constraints.AllowedValues(['PRIMARY',
'SECONDARY']),
@@ -226,6 +227,7 @@ class CloudLoadBalancer(resource.Resource):
),
NODE_WEIGHT: properties.Schema(
properties.Schema.NUMBER,
+ default=1,
constraints=[
constraints.Range(1, 100),
]
@@ -787,6 +789,7 @@ class CloudLoadBalancer(resource.Resource):
return False
def _access_list_needs_update(self, old, new):
+ old = [{key: al[key] for key in self._ACCESS_LIST_KEYS} for al in old]
old = set([frozenset(s.items()) for s in old])
new = set([frozenset(s.items()) for s in new])
return old != new
diff --git a/contrib/rackspace/rackspace/resources/cloud_server.py b/contrib/rackspace/rackspace/resources/cloud_server.py
index d6879d6bf..eb70d21a9 100644
--- a/contrib/rackspace/rackspace/resources/cloud_server.py
+++ b/contrib/rackspace/rackspace/resources/cloud_server.py
@@ -44,10 +44,10 @@ class CloudServer(server.Server):
status=support.UNSUPPORTED,
message=_('This resource is not supported, use at your own risk.'))
- # Managed Cloud automation statuses
- MC_STATUS_IN_PROGRESS = 'In Progress'
- MC_STATUS_COMPLETE = 'Complete'
- MC_STATUS_BUILD_ERROR = 'Build Error'
+ # Rackspace Cloud automation statuses
+ SM_STATUS_IN_PROGRESS = 'In Progress'
+ SM_STATUS_COMPLETE = 'Complete'
+ SM_STATUS_BUILD_ERROR = 'Build Error'
# RackConnect automation statuses
RC_STATUS_DEPLOYING = 'DEPLOYING'
@@ -60,17 +60,34 @@ class CloudServer(server.Server):
{
server.Server.USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
- _('How the user_data should be formatted for the server. For '
- 'HEAT_CFNTOOLS, the user_data is bundled as part of the '
- 'heat-cfntools cloud-init boot configuration data. For RAW '
- 'the user_data is passed to Nova unmodified. '
+ _('How the user_data should be formatted for the server. '
+ 'For RAW the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=server.Server.RAW,
constraints=[
- constraints.AllowedValues(
- server.Server._SOFTWARE_CONFIG_FORMATS),
+ constraints.AllowedValues([
+ server.Server.RAW, server.Server.SOFTWARE_CONFIG
+ ])
+ ]
+ ),
+ }
+ )
+ properties_schema.update(
+ {
+ server.Server.SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
+ properties.Schema.STRING,
+ _('How the server should receive the metadata required for '
+ 'software configuration. POLL_TEMP_URL is the only '
+ 'supported transport on Rackspace Cloud. This property is '
+ 'retained for compatability.'),
+ default=server.Server.POLL_TEMP_URL,
+ update_allowed=True,
+ constraints=[
+ constraints.AllowedValues([
+ server.Server.POLL_TEMP_URL
+ ])
]
),
}
@@ -82,40 +99,42 @@ class CloudServer(server.Server):
self._rack_connect_started_event_sent = False
def _config_drive(self):
+ user_data_format = self.properties.get(self.USER_DATA_FORMAT, "")
+ is_sw_config = user_data_format == self.SOFTWARE_CONFIG
user_data = self.properties.get(self.USER_DATA)
config_drive = self.properties.get(self.CONFIG_DRIVE)
- if user_data or config_drive:
+ if config_drive or is_sw_config or user_data:
return True
else:
return False
- def _check_managed_cloud_complete(self, server):
+ def _check_rax_automation_complete(self, server):
if not self._managed_cloud_started_event_sent:
- msg = _("Waiting for Managed Cloud automation to complete")
+ msg = _("Waiting for Rackspace Cloud automation to complete")
self._add_event(self.action, self.status, msg)
self._managed_cloud_started_event_sent = True
if 'rax_service_level_automation' not in server.metadata:
- LOG.debug("Managed Cloud server does not have the "
+ LOG.debug("Cloud server does not have the "
"rax_service_level_automation metadata tag yet")
return False
mc_status = server.metadata['rax_service_level_automation']
- LOG.debug("Managed Cloud automation status: %s" % mc_status)
+ LOG.debug("Rackspace Cloud automation status: %s" % mc_status)
- if mc_status == self.MC_STATUS_IN_PROGRESS:
+ if mc_status == self.SM_STATUS_IN_PROGRESS:
return False
- elif mc_status == self.MC_STATUS_COMPLETE:
- msg = _("Managed Cloud automation has completed")
+ elif mc_status == self.SM_STATUS_COMPLETE:
+ msg = _("Rackspace Cloud automation has completed")
self._add_event(self.action, self.status, msg)
return True
- elif mc_status == self.MC_STATUS_BUILD_ERROR:
- raise exception.Error(_("Managed Cloud automation failed"))
+ elif mc_status == self.SM_STATUS_BUILD_ERROR:
+ raise exception.Error(_("Rackspace Cloud automation failed"))
else:
- raise exception.Error(_("Unknown Managed Cloud automation "
+ raise exception.Error(_("Unknown Rackspace Cloud automation "
"status: %s") % mc_status)
def _check_rack_connect_complete(self, server):
@@ -147,7 +166,8 @@ class CloudServer(server.Server):
reason = server.metadata.get('rackconnect_unprocessable_reason',
None)
if reason is not None:
- LOG.warn(_LW("RackConnect unprocessable reason: %s"), reason)
+ LOG.warning(_LW("RackConnect unprocessable reason: %s"),
+ reason)
msg = _("RackConnect automation has completed")
self._add_event(self.action, self.status, msg)
@@ -173,8 +193,7 @@ class CloudServer(server.Server):
self._check_rack_connect_complete(server)):
return False
- if ('rax_managed' in self.context.roles and not
- self._check_managed_cloud_complete(server)):
+ if not self._check_rax_automation_complete(server):
return False
return True
diff --git a/contrib/rackspace/rackspace/resources/cloudnetworks.py b/contrib/rackspace/rackspace/resources/cloudnetworks.py
index 1eb0a9d6d..8fd14479e 100644
--- a/contrib/rackspace/rackspace/resources/cloudnetworks.py
+++ b/contrib/rackspace/rackspace/resources/cloudnetworks.py
@@ -12,6 +12,7 @@
# under the License.
from oslo_log import log as logging
+import six
from heat.common.i18n import _
from heat.common.i18n import _LW
@@ -20,7 +21,6 @@ from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
-import six
try:
from pyrax.exceptions import NetworkInUse # noqa
@@ -108,8 +108,8 @@ class CloudNetwork(resource.Resource):
try:
self._network = self.cloud_networks().get(self.resource_id)
except NotFound:
- LOG.warn(_LW("Could not find network %s but resource id is"
- " set."), self.resource_id)
+ LOG.warning(_LW("Could not find network %s but resource id is"
+ " set."), self.resource_id)
return self._network
def cloud_networks(self):
@@ -139,7 +139,7 @@ class CloudNetwork(resource.Resource):
try:
network.delete()
except NetworkInUse:
- LOG.warn(_LW("Network '%s' still in use."), network.id)
+ LOG.warning(_LW("Network '%s' still in use."), network.id)
else:
self._delete_issued = True
return False
diff --git a/contrib/rackspace/rackspace/resources/lb_node.py b/contrib/rackspace/rackspace/resources/lb_node.py
new file mode 100644
index 000000000..d25f1febb
--- /dev/null
+++ b/contrib/rackspace/rackspace/resources/lb_node.py
@@ -0,0 +1,230 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo_utils import timeutils
+import six
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine import resource
+
+try:
+ from pyrax.exceptions import NotFound # noqa
+ PYRAX_INSTALLED = True
+except ImportError:
+ # Setup fake exception for testing without pyrax
+ class NotFound(Exception):
+ pass
+ PYRAX_INSTALLED = False
+
+
+def lb_immutable(exc):
+ return 'immutable' in six.text_type(exc)
+
+
+class LoadbalancerDeleted(exception.HeatException):
+ msg_fmt = _("The Load Balancer (ID %(lb_id)s) has been deleted.")
+
+
+class NodeNotFound(exception.HeatException):
+ msg_fmt = _("Node (ID %(node_id)s) not found on Load Balancer "
+ "(ID %(lb_id)s).")
+
+
+class LBNode(resource.Resource):
+ """Represents a single node of a Rackspace Cloud Load Balancer"""
+
+ default_client_name = 'cloud_lb'
+
+ _CONDITIONS = (
+ ENABLED, DISABLED, DRAINING,
+ ) = (
+ 'ENABLED', 'DISABLED', 'DRAINING',
+ )
+
+ _NODE_KEYS = (
+ ADDRESS, PORT, CONDITION, TYPE, WEIGHT
+ ) = (
+ 'address', 'port', 'condition', 'type', 'weight'
+ )
+
+ _OTHER_KEYS = (
+ LOAD_BALANCER, DRAINING_TIMEOUT
+ ) = (
+ 'load_balancer', 'draining_timeout'
+ )
+
+ PROPERTIES = _NODE_KEYS + _OTHER_KEYS
+
+ properties_schema = {
+ LOAD_BALANCER: properties.Schema(
+ properties.Schema.STRING,
+ _("The ID of the load balancer to associate the node with."),
+ required=True
+ ),
+ DRAINING_TIMEOUT: properties.Schema(
+ properties.Schema.INTEGER,
+ _("The time to wait, in seconds, for the node to drain before it "
+ "is deleted."),
+ default=0,
+ constraints=[
+ constraints.Range(min=0)
+ ],
+ update_allowed=True
+ ),
+ ADDRESS: properties.Schema(
+ properties.Schema.STRING,
+ _("IP address for the node."),
+ required=True
+ ),
+ PORT: properties.Schema(
+ properties.Schema.INTEGER,
+ required=True
+ ),
+ CONDITION: properties.Schema(
+ properties.Schema.STRING,
+ default=ENABLED,
+ constraints=[
+ constraints.AllowedValues(_CONDITIONS),
+ ],
+ update_allowed=True
+ ),
+ TYPE: properties.Schema(
+ properties.Schema.STRING,
+ constraints=[
+ constraints.AllowedValues(['PRIMARY',
+ 'SECONDARY']),
+ ],
+ update_allowed=True
+ ),
+ WEIGHT: properties.Schema(
+ properties.Schema.NUMBER,
+ constraints=[
+ constraints.Range(1, 100),
+ ],
+ update_allowed=True
+ ),
+ }
+
+ def lb(self):
+ lb_id = self.properties.get(self.LOAD_BALANCER)
+ lb = self.client().get(lb_id)
+
+ if lb.status in ('DELETED', 'PENDING_DELETE'):
+ raise LoadbalancerDeleted(lb_id=lb.id)
+
+ return lb
+
+ def node(self, lb):
+ for node in getattr(lb, 'nodes', []):
+ if node.id == self.resource_id:
+ return node
+ raise NodeNotFound(node_id=self.resource_id, lb_id=lb.id)
+
+ def handle_create(self):
+ pass
+
+ def check_create_complete(self, *args):
+ node_args = {k: self.properties.get(k) for k in self._NODE_KEYS}
+ node = self.client().Node(**node_args)
+
+ try:
+ resp, body = self.lb().add_nodes([node])
+ except Exception as exc:
+ if lb_immutable(exc):
+ return False
+ raise
+
+ new_node = body['nodes'][0]
+ node_id = new_node['id']
+
+ self.resource_id_set(node_id)
+ return True
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ return prop_diff
+
+ def check_update_complete(self, prop_diff):
+ node = self.node(self.lb())
+ is_complete = True
+
+ for key in self._NODE_KEYS:
+ if key in prop_diff and getattr(node, key, None) != prop_diff[key]:
+ setattr(node, key, prop_diff[key])
+ is_complete = False
+
+ if is_complete:
+ return True
+
+ try:
+ node.update()
+ except Exception as exc:
+ if lb_immutable(exc):
+ return False
+ raise
+
+ return False
+
+ def handle_delete(self):
+ return timeutils.utcnow()
+
+ def check_delete_complete(self, deleted_at):
+ if self.resource_id is None:
+ return True
+
+ try:
+ node = self.node(self.lb())
+ except (NotFound, LoadbalancerDeleted, NodeNotFound):
+ return True
+
+ if isinstance(deleted_at, six.string_types):
+ deleted_at = timeutils.parse_isotime(deleted_at)
+
+ deleted_at = timeutils.normalize_time(deleted_at)
+ waited = timeutils.utcnow() - deleted_at
+ timeout_secs = self.properties[self.DRAINING_TIMEOUT]
+ timeout_secs = datetime.timedelta(seconds=timeout_secs)
+
+ if waited > timeout_secs:
+ try:
+ node.delete()
+ except NotFound:
+ return True
+ except Exception as exc:
+ if lb_immutable(exc):
+ return False
+ raise
+ elif node.condition != self.DRAINING:
+ node.condition = self.DRAINING
+ try:
+ node.update()
+ except Exception as exc:
+ if lb_immutable(exc):
+ return False
+ raise
+
+ return False
+
+
+def resource_mapping():
+ return {'Rackspace::Cloud::LBNode': LBNode}
+
+
+def available_resource_mapping():
+ if PYRAX_INSTALLED:
+ return resource_mapping()
+ return {}
diff --git a/contrib/rackspace/rackspace/tests/test_auto_scale.py b/contrib/rackspace/rackspace/tests/test_auto_scale.py
index a2e30060b..8a709cb73 100644
--- a/contrib/rackspace/rackspace/tests/test_auto_scale.py
+++ b/contrib/rackspace/rackspace/tests/test_auto_scale.py
@@ -15,6 +15,7 @@ import copy
import itertools
import mock
+import six
from heat.common import exception
from heat.common import template_format
@@ -101,9 +102,15 @@ class FakeAutoScale(object):
def replace_launch_config(self, group_id, **kwargs):
"""Update the launch configuration on a scaling group."""
- allowed = ['launch_config_type', 'server_name', 'image', 'flavor',
- 'disk_config', 'metadata', 'personality', 'networks',
- 'load_balancers', 'key_name', 'user_data', 'config_drive']
+ if kwargs.get('launch_config_type') == 'launch_server':
+ allowed = ['launch_config_type', 'server_name', 'image', 'flavor',
+ 'disk_config', 'metadata', 'personality', 'networks',
+ 'load_balancers', 'key_name', 'user_data',
+ 'config_drive']
+ elif kwargs.get('launch_config_type') == 'launch_stack':
+ allowed = ['launch_config_type', 'template', 'template_url',
+ 'disable_rollback', 'environment', 'files',
+ 'parameters', 'timeout_mins']
self._check_args(kwargs, allowed)
self._get_group(group_id).kwargs = kwargs
@@ -170,7 +177,7 @@ class FakeAutoScale(object):
class ScalingGroupTest(common.HeatTestCase):
- group_template = template_format.parse('''
+ server_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
@@ -200,6 +207,57 @@ class ScalingGroupTest(common.HeatTestCase):
networks:
- uuid: "00000000-0000-0000-0000-000000000000"
- uuid: "11111111-1111-1111-1111-111111111111"
+ loadBalancers:
+ - loadBalancerId: 234
+ port: 80
+
+ ''')
+
+ stack_template = template_format.parse('''
+ HeatTemplateFormatVersion: "2012-12-12"
+ Description: "Rackspace Auto Scale"
+ Parameters: {}
+ Resources:
+ my_group:
+ Type: Rackspace::AutoScale::Group
+ Properties:
+ groupConfiguration:
+ name: "My Group"
+ cooldown: 60
+ minEntities: 1
+ maxEntities: 25
+ metadata:
+ group: metadata
+ launchConfiguration:
+ type: launch_stack
+ args:
+ stack:
+ template: |
+ heat_template_version: 2015-10-15
+ description: This is a Heat template
+ parameters:
+ image:
+ default: cirros-0.3.4-x86_64-uec
+ type: string
+ flavor:
+ default: m1.tiny
+ type: string
+ resources:
+ rand:
+ type: OS::Heat::RandomString
+ disable_rollback: False
+ environment:
+ parameters:
+ image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
+ resource_registry:
+ Heat::InstallConfigAgent:
+ https://myhost.com/bootconfig.yaml
+ files:
+ fileA.yaml: Contents of the file
+ file:///usr/fileB.template: Contents of the file
+ parameters:
+ flavor: 4 GB Performance
+ timeout_mins: 30
''')
def setUp(self):
@@ -210,19 +268,23 @@ class ScalingGroupTest(common.HeatTestCase):
self.patchobject(auto_scale.Group, 'auto_scale',
return_value=self.fake_auto_scale)
# mock nova and glance client methods to satisfy contraints
- mock_im = self.patchobject(glance.GlanceClientPlugin, 'get_image_id')
+ mock_im = self.patchobject(glance.GlanceClientPlugin,
+ 'find_image_by_name_or_id')
mock_im.return_value = 'image-ref'
- mock_fl = self.patchobject(nova.NovaClientPlugin, 'get_flavor_id')
+ mock_fl = self.patchobject(nova.NovaClientPlugin,
+ 'find_flavor_by_name_or_id')
mock_fl.return_value = 'flavor-ref'
- def _setup_test_stack(self):
- self.stack = utils.parse_stack(self.group_template)
+ def _setup_test_stack(self, template=None):
+ if template is None:
+ template = self.server_template
+ self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
- def test_group_create(self):
+ def test_group_create_server(self):
"""Creating a group passes all the correct arguments to pyrax.
Also saves the group ID as the resource ID.
@@ -237,9 +299,12 @@ class ScalingGroupTest(common.HeatTestCase):
'disk_config': None,
'flavor': 'flavor-ref',
'image': 'image-ref',
- 'launch_config_type': 'launch_server',
- 'load_balancers': None,
+ 'load_balancers': [{
+ 'loadBalancerId': 234,
+ 'port': 80,
+ }],
'key_name': "my-key",
+ 'launch_config_type': u'launch_server',
'max_entities': 25,
'group_metadata': {'group': 'metadata'},
'metadata': {'server': 'metadata'},
@@ -256,6 +321,62 @@ class ScalingGroupTest(common.HeatTestCase):
resource = self.stack['my_group']
self.assertEqual('0', resource.FnGetRefId())
+ def test_group_create_stack(self):
+ """Creating a group passes all the correct arguments to pyrax.
+
+ Also saves the group ID as the resource ID.
+ """
+ self._setup_test_stack(self.stack_template)
+ self.assertEqual(1, len(self.fake_auto_scale.groups))
+ self.assertEqual(
+ {
+ 'cooldown': 60,
+ 'min_entities': 1,
+ 'max_entities': 25,
+ 'group_metadata': {'group': 'metadata'},
+ 'name': 'My Group',
+ 'launch_config_type': u'launch_stack',
+ 'template': (
+ '''heat_template_version: 2015-10-15
+description: This is a Heat template
+parameters:
+ image:
+ default: cirros-0.3.4-x86_64-uec
+ type: string
+ flavor:
+ default: m1.tiny
+ type: string
+resources:
+ rand:
+ type: OS::Heat::RandomString
+'''),
+ 'template_url': None,
+ 'disable_rollback': False,
+ 'environment': {
+ 'parameters': {
+ 'image':
+ 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
+ },
+ 'resource_registry': {
+ 'Heat::InstallConfigAgent': ('https://myhost.com/'
+ 'bootconfig.yaml')
+ }
+ },
+ 'files': {
+ 'fileA.yaml': 'Contents of the file',
+ 'file:///usr/fileB.template': 'Contents of the file'
+ },
+ 'parameters': {
+ 'flavor': '4 GB Performance',
+ },
+ 'timeout_mins': 30,
+ },
+ self.fake_auto_scale.groups['0'].kwargs
+ )
+
+ resource = self.stack['my_group']
+ self.assertEqual('0', resource.FnGetRefId())
+
def test_group_create_no_personality(self):
template = template_format.parse('''
@@ -304,7 +425,7 @@ Resources:
'flavor': 'flavor-ref',
'image': 'image-ref',
'launch_config_type': 'launch_server',
- 'load_balancers': None,
+ 'load_balancers': [],
'key_name': "my-key",
'max_entities': 25,
'group_metadata': {'group': 'metadata'},
@@ -356,7 +477,7 @@ Resources:
self.assertEqual(
5, self.fake_auto_scale.groups['0'].kwargs['min_entities'])
- def test_update_launch_config(self):
+ def test_update_launch_config_server(self):
"""Updates the launchConfigresults section.
Updates the launchConfigresults section in a template results in a
@@ -379,6 +500,24 @@ Resources:
[{'loadBalancerId': 1, 'port': 80}],
self.fake_auto_scale.groups['0'].kwargs['load_balancers'])
+ def test_update_launch_config_stack(self):
+ self._setup_test_stack(self.stack_template)
+
+ resource = self.stack['my_group']
+ uprops = copy.deepcopy(dict(resource.properties.data))
+ lcargs = uprops['launchConfiguration']['args']
+ lcargs['stack']['timeout_mins'] = 60
+ new_template = rsrc_defn.ResourceDefinition(resource.name,
+ resource.type(),
+ uprops)
+
+ scheduler.TaskRunner(resource.update, new_template)()
+
+ self.assertEqual(1, len(self.fake_auto_scale.groups))
+ self.assertEqual(
+ 60,
+ self.fake_auto_scale.groups['0'].kwargs['timeout_mins'])
+
def test_delete(self):
"""Deleting a ScalingGroup resource invokes pyrax API to delete it."""
self._setup_test_stack()
@@ -668,3 +807,413 @@ class WebHookTest(common.HeatTestCase):
del self.fake_auto_scale.webhooks['0']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.webhooks)
+
+
+@mock.patch.object(resource.Resource, "client_plugin")
+@mock.patch.object(resource.Resource, "client")
+class AutoScaleGroupValidationTests(common.HeatTestCase):
+ def setUp(self):
+ super(AutoScaleGroupValidationTests, self).setUp()
+ self.mockstack = mock.Mock()
+ self.mockstack.has_cache_data.return_value = False
+ self.mockstack.db_resource_get.return_value = None
+
+ def test_validate_no_rcv3_pool(self, mock_client, mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_server",
+ "args": {
+ "loadBalancers": [{
+ "loadBalancerId": 'not integer!',
+ }],
+ "server": {
+ "name": "sdfsdf",
+ "flavorRef": "ffdgdf",
+ "imageRef": "image-ref",
+ },
+ },
+ },
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ mock_client().list_load_balancer_pools.return_value = []
+ error = self.assertRaises(
+ exception.StackValidationFailed, asg.validate)
+ self.assertEqual(
+ 'Could not find RackConnectV3 pool with id not integer!: ',
+ six.text_type(error))
+
+ def test_validate_rcv3_pool_found(self, mock_client, mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_server",
+ "args": {
+ "loadBalancers": [{
+ "loadBalancerId": 'pool_exists',
+ }],
+ "server": {
+ "name": "sdfsdf",
+ "flavorRef": "ffdgdf",
+ "imageRef": "image-ref",
+ },
+ },
+ },
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ mock_client().list_load_balancer_pools.return_value = [
+ mock.Mock(id='pool_exists'),
+ ]
+ self.assertIsNone(asg.validate())
+
+ def test_validate_no_lb_specified(self, mock_client, mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_server",
+ "args": {
+ "server": {
+ "name": "sdfsdf",
+ "flavorRef": "ffdgdf",
+ "imageRef": "image-ref",
+ },
+ },
+ },
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ self.assertIsNone(asg.validate())
+
+ def test_validate_launch_stack(self, mock_client, mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_stack",
+ "args": {
+ "stack": {
+ 'template': (
+ '''heat_template_version: 2015-10-15
+description: This is a Heat template
+parameters:
+ image:
+ default: cirros-0.3.4-x86_64-uec
+ type: string
+ flavor:
+ default: m1.tiny
+ type: string
+resources:
+ rand:
+ type: OS::Heat::RandomString
+'''),
+ 'template_url': None,
+ 'disable_rollback': False,
+ 'environment': {
+ 'parameters': {
+ 'image':
+ 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
+ },
+ 'resource_registry': {
+ 'Heat::InstallConfigAgent': (
+ 'https://myhost.com/bootconfig.yaml')
+ }
+ },
+ 'files': {
+ 'fileA.yaml': 'Contents of the file',
+ 'file:///usr/fileB.yaml': 'Contents of the file'
+ },
+ 'parameters': {
+ 'flavor': '4 GB Performance',
+ },
+ 'timeout_mins': 30,
+ }
+ }
+ }
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ self.assertIsNone(asg.validate())
+
+ def test_validate_launch_server_and_stack(self, mock_client, mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_server",
+ "args": {
+ "server": {
+ "name": "sdfsdf",
+ "flavorRef": "ffdgdf",
+ "imageRef": "image-ref",
+ },
+ "stack": {
+ 'template': (
+ '''heat_template_version: 2015-10-15
+description: This is a Heat template
+parameters:
+ image:
+ default: cirros-0.3.4-x86_64-uec
+ type: string
+ flavor:
+ default: m1.tiny
+ type: string
+resources:
+ rand:
+ type: OS::Heat::RandomString
+'''),
+ 'template_url': None,
+ 'disable_rollback': False,
+ 'environment': {
+ 'parameters': {
+ 'image':
+ 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
+ },
+ 'resource_registry': {
+ 'Heat::InstallConfigAgent': (
+ 'https://myhost.com/bootconfig.yaml')
+ }
+ },
+ 'files': {
+ 'fileA.yaml': 'Contents of the file',
+ 'file:///usr/fileB.yaml': 'Contents of the file'
+ },
+ 'parameters': {
+ 'flavor': '4 GB Performance',
+ },
+ 'timeout_mins': 30,
+ }
+ }
+ }
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ error = self.assertRaises(
+ exception.StackValidationFailed, asg.validate)
+ self.assertIn(
+ 'Must provide one of server or stack in launchConfiguration',
+ six.text_type(error))
+
+ def test_validate_no_launch_server_or_stack(self, mock_client,
+ mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_server",
+ "args": {}
+ }
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ error = self.assertRaises(
+ exception.StackValidationFailed, asg.validate)
+ self.assertIn(
+ 'Must provide one of server or stack in launchConfiguration',
+ six.text_type(error))
+
+ def test_validate_stack_template_and_template_url(self, mock_client,
+ mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_server",
+ "args": {
+ "stack": {
+ 'template': (
+ '''heat_template_version: 2015-10-15
+description: This is a Heat template
+parameters:
+ image:
+ default: cirros-0.3.4-x86_64-uec
+ type: string
+ flavor:
+ default: m1.tiny
+ type: string
+resources:
+ rand:
+ type: OS::Heat::RandomString
+'''),
+ 'template_url': 'https://myhost.com/template.yaml',
+ }
+ }
+ }
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ error = self.assertRaises(
+ exception.StackValidationFailed, asg.validate)
+ self.assertIn(
+ 'Must provide one of template or template_url',
+ six.text_type(error))
+
+ def test_validate_stack_no_template_or_template_url(self, mock_client,
+ mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_server",
+ "args": {
+ "stack": {
+ 'disable_rollback': False,
+ 'environment': {
+ 'parameters': {
+ 'image':
+ 'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
+ },
+ 'resource_registry': {
+ 'Heat::InstallConfigAgent': (
+ 'https://myhost.com/bootconfig.yaml')
+ }
+ },
+ 'files': {
+ 'fileA.yaml': 'Contents of the file',
+ 'file:///usr/fileB.yaml': 'Contents of the file'
+ },
+ 'parameters': {
+ 'flavor': '4 GB Performance',
+ },
+ 'timeout_mins': 30,
+ }
+ }
+ }
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ error = self.assertRaises(
+ exception.StackValidationFailed, asg.validate)
+ self.assertIn(
+ 'Must provide one of template or template_url',
+ six.text_type(error))
+
+ def test_validate_invalid_template(self, mock_client, mock_plugin):
+ asg_properties = {
+ "groupConfiguration": {
+ "name": "My Group",
+ "cooldown": 60,
+ "minEntities": 1,
+ "maxEntities": 25,
+ "metadata": {
+ "group": "metadata",
+ },
+ },
+ "launchConfiguration": {
+ "type": "launch_stack",
+ "args": {
+ "stack": {
+ 'template': (
+ '''SJDADKJAJKLSheat_template_version: 2015-10-15
+description: This is a Heat template
+parameters:
+ image:
+ default: cirros-0.3.4-x86_64-uec
+ type: string
+ flavor:
+ default: m1.tiny
+ type: string
+resources:
+ rand:
+ type: OS::Heat::RandomString
+'''),
+ 'template_url': None,
+ 'disable_rollback': False,
+ 'environment': {'Foo': 'Bar'},
+ 'files': {
+ 'fileA.yaml': 'Contents of the file',
+ 'file:///usr/fileB.yaml': 'Contents of the file'
+ },
+ 'parameters': {
+ 'flavor': '4 GB Performance',
+ },
+ 'timeout_mins': 30,
+ }
+ }
+ }
+ }
+ rsrcdef = rsrc_defn.ResourceDefinition(
+ "test", auto_scale.Group, properties=asg_properties)
+ asg = auto_scale.Group("test", rsrcdef, self.mockstack)
+
+ error = self.assertRaises(
+ exception.StackValidationFailed, asg.validate)
+ self.assertIn(
+ 'Encountered error while loading template:',
+ six.text_type(error))
diff --git a/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py b/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py
index 3e4e92876..f8cb8a605 100644
--- a/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py
+++ b/contrib/rackspace/rackspace/tests/test_cloud_loadbalancer.py
@@ -343,11 +343,11 @@ class LoadBalancerTest(common.HeatTestCase):
self.lb_name = 'test-clb'
self.expected_body = {
"nodes": [FakeNode(address=u"166.78.103.141", port=80,
- condition=u"ENABLED")],
+ condition=u"ENABLED", type=u"PRIMARY",
+ weight=1)],
"protocol": u'HTTP',
"port": 80,
"virtual_ips": [FakeVirtualIP(type=u"PUBLIC", ipVersion=u"IPV6")],
- "halfClosed": None,
"algorithm": u'LEAST_CONNECTIONS',
"connectionThrottle": {'maxConnectionRate': 1000,
'maxConnections': None,
@@ -680,6 +680,10 @@ class LoadBalancerTest(common.HeatTestCase):
'type': 'ALLOW'},
{'address': '172.165.3.43',
'type': 'DENY'}]
+ api_access_list = [{"address": '192.168.1.1/0', 'id': 1234,
+ 'type': 'ALLOW'},
+ {'address': '172.165.3.43', 'id': 3422,
+ 'type': 'DENY'}]
template = self._set_template(self.lb_template,
accessList=access_list)
@@ -688,7 +692,7 @@ class LoadBalancerTest(common.HeatTestCase):
self.expected_body)
self.m.StubOutWithMock(fake_lb, 'get_access_list')
fake_lb.get_access_list().AndReturn([])
- fake_lb.get_access_list().AndReturn(access_list)
+ fake_lb.get_access_list().AndReturn(api_access_list)
self.m.StubOutWithMock(fake_lb, 'add_access_list')
fake_lb.add_access_list(access_list)
@@ -813,18 +817,24 @@ class LoadBalancerTest(common.HeatTestCase):
update_template['Properties']['nodes'] = [
{"addresses": ["166.78.103.141"],
"port": 80,
- "condition": "ENABLED"},
+ "condition": "ENABLED",
+ "type": "PRIMARY",
+ "weight": 1},
{"addresses": [expected_ip],
"port": 80,
- "condition": "ENABLED"}]
+ "condition": "ENABLED",
+ "type": "PRIMARY",
+ "weight": 1}]
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb)
fake_lb1 = copy.deepcopy(fake_lb)
fake_lb1.nodes = [
- FakeNode(address=u"172.168.1.4", port=80, condition=u"ENABLED"),
- FakeNode(address=u"166.78.103.141", port=80, condition=u"ENABLED"),
+ FakeNode(address=u"172.168.1.4", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"166.78.103.141", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
]
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb1)
@@ -832,7 +842,8 @@ class LoadBalancerTest(common.HeatTestCase):
fake_lb.add_nodes([
fake_lb.Node(address=expected_ip,
port=80,
- condition='ENABLED')])
+ condition='ENABLED',
+ type="PRIMARY", weight=1)])
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.update, update_template)()
@@ -871,9 +882,12 @@ class LoadBalancerTest(common.HeatTestCase):
self.lb_name,
self.expected_body)
current_nodes = [
- FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED"),
- FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED"),
- FakeNode(address=u"3.3.3.3", port=80, condition=u"ENABLED"),
+ FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"3.3.3.3", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1)
]
fake_lb.nodes = current_nodes
fake_lb.tracker = "fake_lb"
@@ -883,9 +897,12 @@ class LoadBalancerTest(common.HeatTestCase):
update_template = copy.deepcopy(rsrc.t)
expected_ip = '4.4.4.4'
update_template['Properties']['nodes'] = [
- {"addresses": ["1.1.1.1"], "port": 80, "condition": "ENABLED"},
- {"addresses": ["2.2.2.2"], "port": 80, "condition": "DISABLED"},
- {"addresses": [expected_ip], "port": 80, "condition": "ENABLED"},
+ {"addresses": ["1.1.1.1"], "port": 80, "condition": "ENABLED",
+ "type": "PRIMARY", "weight": 1},
+ {"addresses": ["2.2.2.2"], "port": 80, "condition": "DISABLED",
+ "type": "PRIMARY", "weight": 1},
+ {"addresses": [expected_ip], "port": 80, "condition": "ENABLED",
+ "type": "PRIMARY", "weight": 1}
]
self.m.UnsetStubs()
@@ -902,10 +919,14 @@ class LoadBalancerTest(common.HeatTestCase):
fake_lb2 = copy.deepcopy(fake_lb1)
fake_lb2.status = "ACTIVE"
fake_lb2.nodes = [
- FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED"),
- FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED"),
- FakeNode(address=u"3.3.3.3", port=80, condition=u"ENABLED"),
- FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED"),
+ FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"3.3.3.3", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
]
fake_lb2.tracker = "fake_lb2"
@@ -917,9 +938,12 @@ class LoadBalancerTest(common.HeatTestCase):
fake_lb3 = copy.deepcopy(fake_lb2)
fake_lb3.status = "ACTIVE"
fake_lb3.nodes = [
- FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED"),
- FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED"),
- FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED"),
+ FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"2.2.2.2", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1)
]
fake_lb3.tracker = "fake_lb3"
@@ -931,9 +955,12 @@ class LoadBalancerTest(common.HeatTestCase):
fake_lb4 = copy.deepcopy(fake_lb3)
fake_lb4.status = "ACTIVE"
fake_lb4.nodes = [
- FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED"),
- FakeNode(address=u"2.2.2.2", port=80, condition=u"DISABLED"),
- FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED"),
+ FakeNode(address=u"1.1.1.1", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"2.2.2.2", port=80, condition=u"DISABLED",
+ type="PRIMARY", weight=1),
+ FakeNode(address=u"4.4.4.4", port=80, condition=u"ENABLED",
+ type="PRIMARY", weight=1)
]
fake_lb4.tracker = "fake_lb4"
@@ -1448,8 +1475,8 @@ class LoadBalancerTest(common.HeatTestCase):
'intermediateCertificate': '', 'secureTrafficOnly': False}
ssl_termination_api = copy.deepcopy(ssl_termination_template)
lb_name = list(six.iterkeys(template['Resources']))[0]
- template['Resources'][lb_name]['Properties']['sslTermination'] = \
- ssl_termination_template
+ template['Resources'][lb_name]['Properties']['sslTermination'] = (
+ ssl_termination_template)
# The SSL termination config is done post-creation, so no need
# to modify self.expected_body
rsrc, fake_lb = self._mock_loadbalancer(template,
@@ -2029,10 +2056,14 @@ class LoadBalancerTest(common.HeatTestCase):
update_template['Properties']['nodes'] = [
{"addresses": ["166.78.103.141"],
"port": 80,
- "condition": "DRAINING"},
+ "condition": "DRAINING",
+ "type": "PRIMARY",
+ "weight": 1},
{"addresses": [expected_ip],
"port": 80,
- "condition": "DRAINING"}]
+ "condition": "DRAINING",
+ "type": "PRIMARY",
+ "weight": 1}]
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
@@ -2043,14 +2074,15 @@ class LoadBalancerTest(common.HeatTestCase):
fake_lb1.add_nodes([
fake_lb1.Node(address=expected_ip,
port=80,
- condition='DRAINING')])
+ condition='DRAINING',
+ type="PRIMARY", weight=1)])
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.nodes = [
FakeNode(address=u"166.78.103.141", port=80,
- condition=u"DRAINING"),
+ condition=u"DRAINING", type="PRIMARY", weight=1),
FakeNode(address=u"172.168.1.4", port=80,
- condition=u"DRAINING"),
+ condition=u"DRAINING", type="PRIMARY", weight=1),
]
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
@@ -2072,10 +2104,14 @@ class LoadBalancerTest(common.HeatTestCase):
update_template['Properties']['nodes'] = [
{"addresses": ["166.78.103.141"],
"port": 80,
- "condition": "ENABLED"},
+ "condition": "ENABLED",
+ "type": "PRIMARY",
+ "weight": 1},
{"addresses": ["166.78.103.141"],
"port": 81,
- "condition": "ENABLED"}]
+ "condition": "ENABLED",
+ "type": "PRIMARY",
+ "weight": 1}]
self.m.UnsetStubs()
self.m.StubOutWithMock(rsrc.clb, 'get')
@@ -2086,15 +2122,16 @@ class LoadBalancerTest(common.HeatTestCase):
fake_lb1.add_nodes([
fake_lb1.Node(address="166.78.103.141",
port=81,
- condition='ENABLED')])
+ condition='ENABLED',
+ type="PRIMARY", weight=1)])
fake_lb1.tracker = "fake_lb1"
fake_lb2 = copy.deepcopy(fake_lb)
fake_lb2.nodes = [
FakeNode(address=u"166.78.103.141", port=80,
- condition=u"ENABLED"),
+ condition=u"ENABLED", type="PRIMARY", weight=1),
FakeNode(address=u"166.78.103.141", port=81,
- condition=u"ENABLED"),
+ condition=u"ENABLED", type="PRIMARY", weight=1),
]
fake_lb2.tracker = "fake_lb2"
rsrc.clb.get(mox.IgnoreArg()).AndReturn(fake_lb2)
@@ -2103,3 +2140,39 @@ class LoadBalancerTest(common.HeatTestCase):
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
+
+ def test_update_nodes_defaults(self):
+ template = copy.deepcopy(self.lb_template)
+ lb_name = list(six.iterkeys(template['Resources']))[0]
+ tmpl_node = template['Resources'][lb_name]['Properties']['nodes'][0]
+ tmpl_node['type'] = "PRIMARY"
+ tmpl_node['condition'] = "ENABLED"
+ tmpl_node['weight'] = 1
+ expected_body = copy.deepcopy(self.expected_body)
+ expected_body['nodes'] = [FakeNode(address=u"166.78.103.141", port=80,
+ condition=u"ENABLED",
+ type="PRIMARY", weight=1)]
+
+ rsrc, fake_lb = self._mock_loadbalancer(template,
+ self.lb_name,
+ expected_body)
+ fake_lb.nodes = self.expected_body['nodes']
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.create)()
+
+ update_template = copy.deepcopy(rsrc.t)
+ update_template['Properties']['nodes'] = [
+ {"addresses": ["166.78.103.141"],
+ "port": 80}]
+
+ self.m.UnsetStubs()
+ self.m.StubOutWithMock(rsrc.clb, 'get')
+ fake_lb1 = copy.deepcopy(fake_lb)
+ rsrc.clb.get(mox.IgnoreArg()).MultipleTimes().AndReturn(fake_lb1)
+
+ self.m.StubOutWithMock(fake_lb1, 'add_nodes')
+
+ self.m.ReplayAll()
+ scheduler.TaskRunner(rsrc.update, update_template)()
+ self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
+ self.m.VerifyAll()
diff --git a/contrib/rackspace/rackspace/tests/test_lb_node.py b/contrib/rackspace/rackspace/tests/test_lb_node.py
new file mode 100644
index 000000000..e42135576
--- /dev/null
+++ b/contrib/rackspace/rackspace/tests/test_lb_node.py
@@ -0,0 +1,305 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import mock
+
+from heat.engine import rsrc_defn
+from heat.tests import common
+
+from ..resources import lb_node # noqa
+from ..resources.lb_node import ( # noqa
+ LoadbalancerDeleted,
+ NotFound,
+ NodeNotFound)
+
+from .test_cloud_loadbalancer import FakeNode # noqa
+
+
+class LBNode(lb_node.LBNode):
+ @classmethod
+ def is_service_available(cls, context):
+ return True
+
+
+class LBNodeTest(common.HeatTestCase):
+ def setUp(self):
+ super(LBNodeTest, self).setUp()
+ self.mockstack = mock.Mock()
+ self.mockstack.has_cache_data.return_value = False
+ self.mockstack.db_resource_get.return_value = None
+ self.mockclient = mock.Mock()
+ self.mockstack.clients.client.return_value = self.mockclient
+
+ self.def_props = {
+ LBNode.LOAD_BALANCER: 'some_lb_id',
+ LBNode.DRAINING_TIMEOUT: 60,
+ LBNode.ADDRESS: 'some_ip',
+ LBNode.PORT: 80,
+ LBNode.CONDITION: 'ENABLED',
+ LBNode.TYPE: 'PRIMARY',
+ LBNode.WEIGHT: None,
+ }
+ self.resource_def = rsrc_defn.ResourceDefinition(
+ "test", LBNode, properties=self.def_props)
+
+ self.resource = LBNode("test", self.resource_def, self.mockstack)
+ self.resource.resource_id = 12345
+
+ def test_create(self):
+ self.resource.resource_id = None
+
+ fake_lb = mock.Mock()
+ fake_lb.add_nodes.return_value = (None, {'nodes': [{'id': 12345}]})
+ self.mockclient.get.return_value = fake_lb
+
+ fake_node = mock.Mock()
+ self.mockclient.Node.return_value = fake_node
+
+ self.resource.check_create_complete()
+
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+ self.mockclient.Node.assert_called_once_with(
+ address='some_ip', port=80, condition='ENABLED',
+ type='PRIMARY', weight=0)
+ fake_lb.add_nodes.assert_called_once_with([fake_node])
+ self.assertEqual(self.resource.resource_id, 12345)
+
+ def test_create_lb_not_found(self):
+ self.mockclient.get.side_effect = NotFound()
+ self.assertRaises(NotFound, self.resource.check_create_complete)
+
+ def test_create_lb_deleted(self):
+ fake_lb = mock.Mock()
+ fake_lb.id = 1111
+ fake_lb.status = 'DELETED'
+ self.mockclient.get.return_value = fake_lb
+
+ exc = self.assertRaises(LoadbalancerDeleted,
+ self.resource.check_create_complete)
+ self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
+ str(exc))
+
+ def test_create_lb_pending_delete(self):
+ fake_lb = mock.Mock()
+ fake_lb.id = 1111
+ fake_lb.status = 'PENDING_DELETE'
+ self.mockclient.get.return_value = fake_lb
+
+ exc = self.assertRaises(LoadbalancerDeleted,
+ self.resource.check_create_complete)
+ self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
+ str(exc))
+
+ def test_handle_update_method(self):
+ self.assertEqual(self.resource.handle_update(None, None, 'foo'), 'foo')
+
+ def _test_update(self, diff):
+ fake_lb = mock.Mock()
+ fake_node = FakeNode(id=12345, address='a', port='b')
+ fake_node.update = mock.Mock()
+ expected_node = FakeNode(id=12345, address='a', port='b', **diff)
+ expected_node.update = fake_node.update
+ fake_lb.nodes = [fake_node]
+ self.mockclient.get.return_value = fake_lb
+
+ self.assertFalse(self.resource.check_update_complete(prop_diff=diff))
+
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+ fake_node.update.assert_called_once_with()
+ self.assertEqual(fake_node, expected_node)
+
+ def test_update_condition(self):
+ self._test_update({'condition': 'DISABLED'})
+
+ def test_update_weight(self):
+ self._test_update({'weight': 100})
+
+ def test_update_type(self):
+ self._test_update({'type': 'SECONDARY'})
+
+ def test_update_multiple(self):
+ self._test_update({'condition': 'DISABLED',
+ 'weight': 100,
+ 'type': 'SECONDARY'})
+
+ def test_update_finished(self):
+ fake_lb = mock.Mock()
+ fake_node = FakeNode(id=12345, address='a', port='b',
+ condition='ENABLED')
+ fake_node.update = mock.Mock()
+ expected_node = FakeNode(id=12345, address='a', port='b',
+ condition='ENABLED')
+ expected_node.update = fake_node.update
+ fake_lb.nodes = [fake_node]
+ self.mockclient.get.return_value = fake_lb
+
+ diff = {'condition': 'ENABLED'}
+ self.assertTrue(self.resource.check_update_complete(prop_diff=diff))
+
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+ self.assertFalse(fake_node.update.called)
+ self.assertEqual(fake_node, expected_node)
+
+ def test_update_lb_not_found(self):
+ self.mockclient.get.side_effect = NotFound()
+
+ diff = {'condition': 'ENABLED'}
+ self.assertRaises(NotFound, self.resource.check_update_complete,
+ prop_diff=diff)
+
+ def test_update_lb_deleted(self):
+ fake_lb = mock.Mock()
+ fake_lb.id = 1111
+ fake_lb.status = 'DELETED'
+ self.mockclient.get.return_value = fake_lb
+
+ diff = {'condition': 'ENABLED'}
+ exc = self.assertRaises(LoadbalancerDeleted,
+ self.resource.check_update_complete,
+ prop_diff=diff)
+ self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
+ str(exc))
+
+ def test_update_lb_pending_delete(self):
+ fake_lb = mock.Mock()
+ fake_lb.id = 1111
+ fake_lb.status = 'PENDING_DELETE'
+ self.mockclient.get.return_value = fake_lb
+
+ diff = {'condition': 'ENABLED'}
+ exc = self.assertRaises(LoadbalancerDeleted,
+ self.resource.check_update_complete,
+ prop_diff=diff)
+ self.assertEqual("The Load Balancer (ID 1111) has been deleted.",
+ str(exc))
+
+ def test_update_node_not_found(self):
+ fake_lb = mock.Mock()
+ fake_lb.id = 4444
+ fake_lb.nodes = []
+ self.mockclient.get.return_value = fake_lb
+
+ diff = {'condition': 'ENABLED'}
+ exc = self.assertRaises(NodeNotFound,
+ self.resource.check_update_complete,
+ prop_diff=diff)
+ self.assertEqual(
+ "Node (ID 12345) not found on Load Balancer (ID 4444).", str(exc))
+
+ def test_delete_no_id(self):
+ self.resource.resource_id = None
+ self.assertTrue(self.resource.check_delete_complete(None))
+
+ def test_delete_lb_already_deleted(self):
+ self.mockclient.get.side_effect = NotFound()
+ self.assertTrue(self.resource.check_delete_complete(None))
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+
+ def test_delete_lb_deleted_status(self):
+ fake_lb = mock.Mock()
+ fake_lb.status = 'DELETED'
+ self.mockclient.get.return_value = fake_lb
+
+ self.assertTrue(self.resource.check_delete_complete(None))
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+
+ def test_delete_lb_pending_delete_status(self):
+ fake_lb = mock.Mock()
+ fake_lb.status = 'PENDING_DELETE'
+ self.mockclient.get.return_value = fake_lb
+
+ self.assertTrue(self.resource.check_delete_complete(None))
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+
+ def test_delete_node_already_deleted(self):
+ fake_lb = mock.Mock()
+ fake_lb.nodes = []
+ self.mockclient.get.return_value = fake_lb
+
+ self.assertTrue(self.resource.check_delete_complete(None))
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+
+ @mock.patch.object(lb_node.timeutils, 'utcnow')
+ def test_drain_before_delete(self, mock_utcnow):
+ fake_lb = mock.Mock()
+ fake_node = FakeNode(id=12345, address='a', port='b')
+ expected_node = FakeNode(id=12345, address='a', port='b',
+ condition='DRAINING')
+ fake_node.update = mock.Mock()
+ expected_node.update = fake_node.update
+ fake_node.delete = mock.Mock()
+ expected_node.delete = fake_node.delete
+ fake_lb.nodes = [fake_node]
+ self.mockclient.get.return_value = fake_lb
+
+ now = datetime.datetime.utcnow()
+ mock_utcnow.return_value = now
+
+ self.assertFalse(self.resource.check_delete_complete(now))
+
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+ fake_node.update.assert_called_once_with()
+ self.assertFalse(fake_node.delete.called)
+ self.assertEqual(fake_node, expected_node)
+
+ @mock.patch.object(lb_node.timeutils, 'utcnow')
+ def test_delete_waiting(self, mock_utcnow):
+ fake_lb = mock.Mock()
+ fake_node = FakeNode(id=12345, address='a', port='b',
+ condition='DRAINING')
+ expected_node = FakeNode(id=12345, address='a', port='b',
+ condition='DRAINING')
+ fake_node.update = mock.Mock()
+ expected_node.update = fake_node.update
+ fake_node.delete = mock.Mock()
+ expected_node.delete = fake_node.delete
+ fake_lb.nodes = [fake_node]
+ self.mockclient.get.return_value = fake_lb
+
+ now = datetime.datetime.utcnow()
+ now_plus_30 = now + datetime.timedelta(seconds=30)
+ mock_utcnow.return_value = now_plus_30
+
+ self.assertFalse(self.resource.check_delete_complete(now))
+
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+ self.assertFalse(fake_node.update.called)
+ self.assertFalse(fake_node.delete.called)
+ self.assertEqual(fake_node, expected_node)
+
+ @mock.patch.object(lb_node.timeutils, 'utcnow')
+ def test_delete_finishing(self, mock_utcnow):
+ fake_lb = mock.Mock()
+ fake_node = FakeNode(id=12345, address='a', port='b',
+ condition='DRAINING')
+ expected_node = FakeNode(id=12345, address='a', port='b',
+ condition='DRAINING')
+ fake_node.update = mock.Mock()
+ expected_node.update = fake_node.update
+ fake_node.delete = mock.Mock()
+ expected_node.delete = fake_node.delete
+ fake_lb.nodes = [fake_node]
+ self.mockclient.get.return_value = fake_lb
+
+ now = datetime.datetime.utcnow()
+ now_plus_62 = now + datetime.timedelta(seconds=62)
+ mock_utcnow.return_value = now_plus_62
+
+ self.assertFalse(self.resource.check_delete_complete(now))
+
+ self.mockclient.get.assert_called_once_with('some_lb_id')
+ self.assertFalse(fake_node.update.called)
+ self.assertTrue(fake_node.delete.called)
+ self.assertEqual(fake_node, expected_node)
diff --git a/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py b/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py
index 258555945..6f534db9d 100644
--- a/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py
+++ b/contrib/rackspace/rackspace/tests/test_rackspace_cloud_server.py
@@ -76,15 +76,15 @@ class CloudServersTest(common.HeatTestCase):
resource._register_class("OS::Nova::Server",
cloud_server.CloudServer)
- def _mock_get_image_id_success(self, imageId):
+ def _mock_find_image_by_name_or_id_success(self, imageId):
self.mock_get_image = mock.Mock()
self.ctx.clients.client_plugin(
- 'glance').get_image_id = self.mock_get_image
+ 'glance').find_image_by_name_or_id = self.mock_get_image
self.mock_get_image.return_value = imageId
def _stub_server_validate(self, server, imageId_input, image_id):
# stub glance image validate
- self._mock_get_image_id_success(image_id)
+ self._mock_find_image_by_name_or_id_success(image_id)
def _setup_test_stack(self, stack_name):
t = template_format.parse(wp_template)
@@ -158,7 +158,10 @@ class CloudServersTest(common.HeatTestCase):
def test_rackconnect_deployed(self):
return_server = self.fc.servers.list()[1]
- return_server.metadata = {'rackconnect_automation_status': 'DEPLOYED'}
+ return_server.metadata = {
+ 'rackconnect_automation_status': 'DEPLOYED',
+ 'rax_service_level_automation': 'Complete',
+ }
server = self._setup_test_server(return_server,
'test_rackconnect_deployed')
server.context.roles = ['rack_connect']
@@ -173,7 +176,10 @@ class CloudServersTest(common.HeatTestCase):
def test_rackconnect_failed(self):
return_server = self.fc.servers.list()[1]
- return_server.metadata = {'rackconnect_automation_status': 'FAILED'}
+ return_server.metadata = {
+ 'rackconnect_automation_status': 'FAILED',
+ 'rax_service_level_automation': 'Complete',
+ }
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(return_server.id).MultipleTimes(
).AndReturn(return_server)
@@ -189,10 +195,11 @@ class CloudServersTest(common.HeatTestCase):
def test_rackconnect_unprocessable(self):
return_server = self.fc.servers.list()[1]
- return_server.metadata = {'rackconnect_automation_status':
- 'UNPROCESSABLE',
- 'rackconnect_unprocessable_reason':
- 'Fake reason'}
+ return_server.metadata = {
+ 'rackconnect_automation_status': 'UNPROCESSABLE',
+ 'rackconnect_unprocessable_reason': 'Fake reason',
+ 'rax_service_level_automation': 'Complete',
+ }
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(return_server.id).MultipleTimes(
).AndReturn(return_server)
@@ -207,7 +214,10 @@ class CloudServersTest(common.HeatTestCase):
def test_rackconnect_unknown(self):
return_server = self.fc.servers.list()[1]
- return_server.metadata = {'rackconnect_automation_status': 'FOO'}
+ return_server.metadata = {
+ 'rackconnect_automation_status': 'FOO',
+ 'rax_service_level_automation': 'Complete',
+ }
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(return_server.id).MultipleTimes(
).AndReturn(return_server)
@@ -227,20 +237,22 @@ class CloudServersTest(common.HeatTestCase):
'srv_sts_bld')
server.resource_id = 1234
server.context.roles = ['rack_connect']
-
check_iterations = [0]
# Bind fake get method which check_create_complete will call
def activate_status(server):
check_iterations[0] += 1
if check_iterations[0] == 1:
- return_server.metadata[
- 'rackconnect_automation_status'] = 'DEPLOYING'
+ return_server.metadata.update({
+ 'rackconnect_automation_status': 'DEPLOYING',
+ 'rax_service_level_automation': 'Complete',
+ })
if check_iterations[0] == 2:
return_server.status = 'ACTIVE'
if check_iterations[0] > 3:
- return_server.metadata[
- 'rackconnect_automation_status'] = 'DEPLOYED'
+ return_server.metadata.update({
+ 'rackconnect_automation_status': 'DEPLOYED',
+ })
return return_server
self.patchobject(self.fc.servers, 'get',
side_effect=activate_status)
@@ -255,6 +267,7 @@ class CloudServersTest(common.HeatTestCase):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
+
server.resource_id = 1234
server.context.roles = ['rack_connect']
@@ -265,11 +278,11 @@ class CloudServersTest(common.HeatTestCase):
check_iterations[0] += 1
if check_iterations[0] == 1:
return_server.status = 'ACTIVE'
- if check_iterations[0] == 2:
- return_server.metadata = {}
if check_iterations[0] > 2:
- return_server.metadata[
- 'rackconnect_automation_status'] = 'DEPLOYED'
+ return_server.metadata.update({
+ 'rackconnect_automation_status': 'DEPLOYED',
+ 'rax_service_level_automation': 'Complete'})
+
return return_server
self.patchobject(self.fc.servers, 'get',
side_effect=activate_status)
@@ -280,13 +293,13 @@ class CloudServersTest(common.HeatTestCase):
self.m.VerifyAll()
- def test_managed_cloud_lifecycle(self):
+ def test_rax_automation_lifecycle(self):
return_server = self.fc.servers.list()[0]
server = self._setup_test_server(return_server,
'srv_sts_bld')
server.resource_id = 1234
- server.context.roles = ['rack_connect', 'rax_managed']
-
+ server.context.roles = ['rack_connect']
+ server.metadata = {}
check_iterations = [0]
# Bind fake get method which check_create_complete will call
@@ -317,6 +330,7 @@ class CloudServersTest(common.HeatTestCase):
def test_add_port_for_addresses(self):
return_server = self.fc.servers.list()[1]
+ return_server.metadata = {'rax_service_level_automation': 'Complete'}
stack_name = 'test_stack'
(tmpl, stack) = self._setup_test_stack(stack_name)
resource_defns = tmpl.resource_definitions(stack)
@@ -411,13 +425,12 @@ class CloudServersTest(common.HeatTestCase):
resp = server._add_port_for_address(return_server)
self.assertEqual(expected, resp)
- def test_managed_cloud_build_error(self):
+ def test_rax_automation_build_error(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation':
'Build Error'}
server = self._setup_test_server(return_server,
'test_managed_cloud_build_error')
- server.context.roles = ['rax_managed']
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(return_server.id).MultipleTimes(
).AndReturn(return_server)
@@ -425,15 +438,14 @@ class CloudServersTest(common.HeatTestCase):
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_managed_cloud_build_error: '
- 'Managed Cloud automation failed',
+ 'Rackspace Cloud automation failed',
six.text_type(exc))
- def test_managed_cloud_unknown(self):
+ def test_rax_automation_unknown(self):
return_server = self.fc.servers.list()[1]
return_server.metadata = {'rax_service_level_automation': 'FOO'}
server = self._setup_test_server(return_server,
'test_managed_cloud_unknown')
- server.context.roles = ['rax_managed']
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get(return_server.id).MultipleTimes(
).AndReturn(return_server)
@@ -441,24 +453,34 @@ class CloudServersTest(common.HeatTestCase):
create = scheduler.TaskRunner(server.create)
exc = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual('Error: resources.test_managed_cloud_unknown: '
- 'Unknown Managed Cloud automation status: FOO',
+ 'Unknown Rackspace Cloud automation status: FOO',
six.text_type(exc))
- def _test_server_config_drive(self, user_data, config_drive, result):
+ def _test_server_config_drive(self, user_data, config_drive, result,
+ ud_format='RAW'):
return_server = self.fc.servers.list()[1]
+ return_server.metadata = {'rax_service_level_automation': 'Complete'}
stack_name = 'no_user_data'
(tmpl, stack) = self._setup_test_stack(stack_name)
properties = tmpl.t['Resources']['WebServer']['Properties']
properties['user_data'] = user_data
properties['config_drive'] = config_drive
+ properties['user_data_format'] = ud_format
+ properties['software_config_transport'] = "POLL_TEMP_URL"
resource_defns = tmpl.resource_definitions(stack)
server = cloud_server.CloudServer('WebServer',
resource_defns['WebServer'], stack)
+ server.metadata = {'rax_service_level_automation': 'Complete'}
self.patchobject(server, 'store_external_ports')
+ self.patchobject(server, "_populate_deployments_metadata")
mock_servers_create = mock.Mock(return_value=return_server)
self.fc.servers.create = mock_servers_create
image_id = mock.ANY
- self._mock_get_image_id_success(image_id)
+ self._mock_find_image_by_name_or_id_success(image_id)
+ self.m.StubOutWithMock(self.fc.servers, 'get')
+ self.fc.servers.get(return_server.id).MultipleTimes(
+ ).AndReturn(return_server)
+ self.m.ReplayAll()
scheduler.TaskRunner(server.create)()
mock_servers_create.assert_called_with(
image=image_id,
@@ -490,3 +512,7 @@ class CloudServersTest(common.HeatTestCase):
def test_server_no_user_data_no_config_drive(self):
self._test_server_config_drive(None, False, False)
+
+ def test_server_no_user_data_software_config(self):
+ self._test_server_config_drive(None, False, True,
+ ud_format="SOFTWARE_CONFIG")
diff --git a/contrib/rackspace/requirements.txt b/contrib/rackspace/requirements.txt
index de418f324..5d197dd52 100644
--- a/contrib/rackspace/requirements.txt
+++ b/contrib/rackspace/requirements.txt
@@ -1 +1 @@
-pyrax>=1.9.2
+-e git+https://github.com/rackerlabs/heat-pyrax.git#egg=pyrax