summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2015-10-21 22:14:40 +0000
committerGerrit Code Review <review@openstack.org>2015-10-21 22:14:40 +0000
commit6e54b343b03bfd1549a51ddc358fbe004f45edcf (patch)
tree33e6b230057753295d46c38fe74c932b4ca9c188
parent472a5017271f8f6121b60412248a7557928b4439 (diff)
parente182a57c9a8ba2f5f4203fac3d2aa04d45dffb5e (diff)
downloadheat-6e54b343b03bfd1549a51ddc358fbe004f45edcf.tar.gz
Merge "ASG scaling account for cooldown timestamp & in-progress" into stable/juno
-rw-r--r--heat/engine/resources/autoscaling.py5
-rw-r--r--heat/engine/resources/aws/scaling_policy.py36
-rw-r--r--heat/engine/resources/openstack/scaling_policy.py36
-rw-r--r--heat/scaling/cooldown.py30
-rw-r--r--heat/tests/test_autoscaling.py255
-rw-r--r--heat/tests/test_heat_autoscaling_group.py2
6 files changed, 241 insertions, 123 deletions
diff --git a/heat/engine/resources/autoscaling.py b/heat/engine/resources/autoscaling.py
index 0d0b83971..307cf6f58 100644
--- a/heat/engine/resources/autoscaling.py
+++ b/heat/engine/resources/autoscaling.py
@@ -627,6 +627,7 @@ class AutoScalingGroup(InstanceGroup, cooldown.CooldownMixin):
adjustment_type, lower, upper)
if new_capacity == capacity:
+ self._cooldown_timestamp(None)
LOG.debug('no change in capacity %d' % capacity)
return
@@ -661,8 +662,8 @@ class AutoScalingGroup(InstanceGroup, cooldown.CooldownMixin):
'group': notif['groupname']},
})
notification.send(**notif)
-
- self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))
+ finally:
+ self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))
def _tags(self):
"""Add Identifing Tags to all servers in the group.
diff --git a/heat/engine/resources/aws/scaling_policy.py b/heat/engine/resources/aws/scaling_policy.py
index 40986bb89..f1aa45d26 100644
--- a/heat/engine/resources/aws/scaling_policy.py
+++ b/heat/engine/resources/aws/scaling_policy.py
@@ -127,22 +127,26 @@ class AWSScalingPolicy(signal_responder.SignalResponder,
asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
group = self.stack.resource_by_refid(asgn_id)
- if group is None:
- raise exception.NotFound(_('Alarm %(alarm)s could not find '
- 'scaling group named "%(group)s"') % {
- 'alarm': self.name,
- 'group': asgn_id})
-
- LOG.info(_('%(name)s Alarm, adjusting Group %(group)s with id '
- '%(asgn_id)s by %(filter)s')
- % {'name': self.name, 'group': group.name, 'asgn_id': asgn_id,
- 'filter': self.properties[self.SCALING_ADJUSTMENT]})
- adjustment_type = self._get_adjustement_type()
- group.adjust(self.properties[self.SCALING_ADJUSTMENT], adjustment_type)
-
- self._cooldown_timestamp("%s : %s" %
- (self.properties[self.ADJUSTMENT_TYPE],
- self.properties[self.SCALING_ADJUSTMENT]))
+ try:
+ if group is None:
+ raise exception.NotFound(
+ _('Alarm %(alarm)s could not find '
+ 'scaling group named "%(group)s"') % {
+ 'alarm': self.name, 'group': asgn_id})
+
+ LOG.info(_('%(name)s Alarm, adjusting Group %(group)s with id '
+ '%(asgn_id)s by %(filter)s')
+ % {'name': self.name, 'group': group.name,
+ 'asgn_id': asgn_id,
+ 'filter': self.properties[self.SCALING_ADJUSTMENT]})
+ adjustment_type = self._get_adjustement_type()
+ group.adjust(self.properties[self.SCALING_ADJUSTMENT],
+ adjustment_type)
+ finally:
+ self._cooldown_timestamp(
+ "%s : %s" %
+ (self.properties[self.ADJUSTMENT_TYPE],
+ self.properties[self.SCALING_ADJUSTMENT]))
def _resolve_attribute(self, name):
'''
diff --git a/heat/engine/resources/openstack/scaling_policy.py b/heat/engine/resources/openstack/scaling_policy.py
index 0d1fed325..4a21c666a 100644
--- a/heat/engine/resources/openstack/scaling_policy.py
+++ b/heat/engine/resources/openstack/scaling_policy.py
@@ -136,22 +136,26 @@ class AutoScalingPolicy(signal_responder.SignalResponder,
asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
group = self.stack.resource_by_refid(asgn_id)
- if group is None:
- raise exception.NotFound(_('Alarm %(alarm)s could not find '
- 'scaling group named "%(group)s"') % {
- 'alarm': self.name,
- 'group': asgn_id})
-
- LOG.info(_('%(name)s Alarm, adjusting Group %(group)s with id '
- '%(asgn_id)s by %(filter)s')
- % {'name': self.name, 'group': group.name, 'asgn_id': asgn_id,
- 'filter': self.properties[self.SCALING_ADJUSTMENT]})
- adjustment_type = self._get_adjustement_type()
- group.adjust(self.properties[self.SCALING_ADJUSTMENT], adjustment_type)
-
- self._cooldown_timestamp("%s : %s" %
- (self.properties[self.ADJUSTMENT_TYPE],
- self.properties[self.SCALING_ADJUSTMENT]))
+ try:
+ if group is None:
+ raise exception.NotFound(
+ _('Alarm %(alarm)s could not find '
+ 'scaling group named "%(group)s"') % {
+ 'alarm': self.name, 'group': asgn_id})
+
+ LOG.info(_('%(name)s Alarm, adjusting Group %(group)s with id '
+ '%(asgn_id)s by %(filter)s')
+ % {'name': self.name, 'group': group.name,
+ 'asgn_id': asgn_id,
+ 'filter': self.properties[self.SCALING_ADJUSTMENT]})
+ adjustment_type = self._get_adjustement_type()
+ group.adjust(self.properties[self.SCALING_ADJUSTMENT],
+ adjustment_type)
+ finally:
+ self._cooldown_timestamp(
+ "%s : %s" %
+ (self.properties[self.ADJUSTMENT_TYPE],
+ self.properties[self.SCALING_ADJUSTMENT]))
def _resolve_attribute(self, name):
if name == self.ALARM_URL and self.resource_id is not None:
diff --git a/heat/scaling/cooldown.py b/heat/scaling/cooldown.py
index 882b99de2..d621f3f2c 100644
--- a/heat/scaling/cooldown.py
+++ b/heat/scaling/cooldown.py
@@ -10,6 +10,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import six
from oslo.utils import timeutils
@@ -17,7 +18,8 @@ from oslo.utils import timeutils
class CooldownMixin(object):
'''
Utility class to encapsulate Cooldown related logic which is shared
- between AutoScalingGroup and ScalingPolicy
+ between AutoScalingGroup and ScalingPolicy. This logic includes both
+ cooldown timestamp comparing and scaling in progress checking.
'''
def _cooldown_inprogress(self):
inprogress = False
@@ -29,16 +31,34 @@ class CooldownMixin(object):
cooldown = 0
metadata = self.metadata_get()
- if metadata and cooldown != 0:
- last_adjust = metadata.keys()[0]
+ if metadata.get('scaling_in_progress'):
+ return True
+
+ if 'cooldown' not in metadata:
+ # Note: this is for supporting old version cooldown checking
+ if metadata and cooldown != 0:
+ last_adjust = next(six.iterkeys(metadata))
+ if not timeutils.is_older_than(last_adjust, cooldown):
+ inprogress = True
+ elif cooldown != 0:
+ last_adjust = next(six.iterkeys(metadata['cooldown']))
if not timeutils.is_older_than(last_adjust, cooldown):
inprogress = True
+
+ if not inprogress:
+ metadata['scaling_in_progress'] = True
+ self.metadata_set(metadata)
+
return inprogress
def _cooldown_timestamp(self, reason):
- # Save resource metadata with a timestamp and reason
+ # Save cooldown timestamp into metadata and clean the
+ # scaling_in_progress state.
# If we wanted to implement the AutoScaling API like AWS does,
# we could maintain event history here, but since we only need
# the latest event for cooldown, just store that for now
- metadata = {timeutils.strtime(): reason}
+ metadata = self.metadata_get()
+ if reason is not None:
+ metadata['cooldown'] = {timeutils.utcnow().isoformat(): reason}
+ metadata['scaling_in_progress'] = False
self.metadata_set(metadata)
diff --git a/heat/tests/test_autoscaling.py b/heat/tests/test_autoscaling.py
index d57f688dc..5f3588af8 100644
--- a/heat/tests/test_autoscaling.py
+++ b/heat/tests/test_autoscaling.py
@@ -273,18 +273,36 @@ class AutoScalingTest(HeatTestCase):
suffix='end',
).AndReturn(False)
- def _stub_meta_expected(self, now, data, nmeta=1):
+ def _stub_meta_expected(self, now, data=None, nsignal=1):
# Stop time at now
timeutils.set_time_override(now)
self.addCleanup(timeutils.clear_time_override)
- # Then set a stub to ensure the metadata update is as
- # expected based on the timestamp and data
+ # Then set a stub to ensure the metadata updates are as expected.
self.m.StubOutWithMock(resource.Resource, 'metadata_set')
- expected = {timeutils.strtime(now): data}
+
+ # Set scaling_in_progress=True at the beginning of a scaling operation.
+ # This does not happen on create.
+ init_expected = mox.ContainsKeyValue('scaling_in_progress', True)
+ # Note for ScalingPolicy, we expect to get a metadata
+ # update for the policy and autoscaling group, so pass nsignal=2 in
+ # that case
+ for x in range(nsignal):
+ resource.Resource.metadata_set(init_expected).AndReturn(None)
+
+ # Set scaling_in_progress=False and the cooldown timestamp at the end
+ # of a scaling operation. This occurs on both create and scale events.
+ if data is None:
+ cooldown = mox.IgnoreArg()
+ else:
+ cooldown = {now.isoformat(): data}
+ expected = {'cooldown': cooldown,
+ 'scaling_in_progress': False}
# Note for ScalingPolicy, we expect to get a metadata
- # update for the policy and autoscaling group, so pass nmeta=2
- for x in range(nmeta):
+ # update for the policy and autoscaling group, so pass nsignal=2
+ # Creating an autoscaling group also performs a metadata write, so
+ # expect 1 write when nsignal=0
+ for x in range(nsignal or 1):
resource.Resource.metadata_set(expected).AndReturn(None)
def test_scaling_delete_empty(self):
@@ -312,7 +330,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
@@ -321,6 +339,11 @@ class AutoScalingTest(HeatTestCase):
self.assertEqual(1, len(instance_names))
# Reduce the min size to 0, should complete without adjusting
+ self.m.VerifyAll()
+ self.m.UnsetStubs()
+ self._stub_meta_expected(now)
+ self.m.ReplayAll()
+
props = copy.copy(rsrc.properties.data)
props['MinSize'] = '0'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
@@ -348,7 +371,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -372,7 +395,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -399,7 +422,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -432,7 +455,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -462,7 +485,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -494,7 +517,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -524,7 +547,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -587,14 +610,19 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(1, len(rsrc.get_instance_names()))
instance_names = rsrc.get_instance_names()
+ self.m.VerifyAll()
+ self.m.UnsetStubs()
+
# Reduce the max size to 2, should complete without adjusting
+ self._stub_meta_expected(now)
+ self.m.ReplayAll()
props = copy.copy(rsrc.properties.data)
props['MaxSize'] = '2'
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
@@ -616,7 +644,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -649,7 +677,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -682,7 +710,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -715,15 +743,20 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
self.assertEqual(2, len(rsrc.get_instance_names()))
instance_names = rsrc.get_instance_names()
+ self.m.VerifyAll()
+ self.m.UnsetStubs()
+
# Remove DesiredCapacity from the updated template, which should
# have no effect, it's an optional parameter
+ self._stub_meta_expected(now)
+ self.m.ReplayAll()
props = copy.copy(rsrc.properties.data)
del props['DesiredCapacity']
update_snippet = rsrc_defn.ResourceDefinition(rsrc.name,
@@ -744,7 +777,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -785,7 +818,7 @@ class AutoScalingTest(HeatTestCase):
short_id.generate_id().AndReturn('aaaabbbbcccc')
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
stack = utils.parse_stack(t, params=self.params)
@@ -838,7 +871,7 @@ class AutoScalingTest(HeatTestCase):
mox.IgnoreArg()).AndReturn(None)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
stack = utils.parse_stack(t, params=self.params)
@@ -877,7 +910,7 @@ class AutoScalingTest(HeatTestCase):
properties['DesiredCapacity'] = '3'
self._stub_lb_reload(3)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 3')
+ self._stub_meta_expected(now, 'ExactCapacity : 3', 0)
self._stub_create(3)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -924,7 +957,7 @@ class AutoScalingTest(HeatTestCase):
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -955,7 +988,7 @@ class AutoScalingTest(HeatTestCase):
properties['DesiredCapacity'] = '2'
self._stub_lb_reload(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -980,6 +1013,11 @@ class AutoScalingTest(HeatTestCase):
self.assertEqual(1, len(rsrc.get_instance_names()))
# no change
+ self.m.VerifyAll()
+ self.m.UnsetStubs()
+ self._stub_meta_expected(now)
+ self.m.ReplayAll()
+
rsrc.adjust(0)
self.assertEqual(1, len(rsrc.get_instance_names()))
@@ -997,7 +1035,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(2)
self._stub_create(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
@@ -1043,7 +1081,7 @@ class AutoScalingTest(HeatTestCase):
properties['Cooldown'] = '60'
self._stub_lb_reload(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -1055,6 +1093,13 @@ class AutoScalingTest(HeatTestCase):
self._stub_delete(1)
self.stub_ImageConstraint_validate(num=1)
self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
+
+ self.m.StubOutWithMock(resource.Resource, 'metadata_get')
+ # Note: in reality the group create is counted as a cooldown event, so
+ # there would actually be metadata here and the scale up would not
+ # happen
+ resource.Resource.metadata_get().MultipleTimes().AndReturn({})
+
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(1, len(rsrc.get_instance_names()))
@@ -1064,8 +1109,8 @@ class AutoScalingTest(HeatTestCase):
# scaling group instances should be unchanged
# Note we have to stub Resource.metadata_get since up_policy isn't
# stored in the DB (because the stack hasn't really been created)
- previous_meta = {timeutils.strtime(now):
- 'PercentChangeInCapacity : -50'}
+ previous_meta = {'cooldown': {now.isoformat():
+ 'PercentChangeInCapacity : -50'}}
self.m.VerifyAll()
self.m.UnsetStubs()
@@ -1075,7 +1120,7 @@ class AutoScalingTest(HeatTestCase):
self.addCleanup(timeutils.clear_time_override)
self.m.StubOutWithMock(resource.Resource, 'metadata_get')
- rsrc.metadata_get().AndReturn(previous_meta)
+ rsrc.metadata_get().MultipleTimes().AndReturn(previous_meta)
self.m.ReplayAll()
@@ -1096,7 +1141,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(2)
self._stub_create(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
stack.resources['WebServerGroup'] = rsrc
@@ -1107,6 +1152,13 @@ class AutoScalingTest(HeatTestCase):
self._stub_delete(1)
self.stub_ImageConstraint_validate(num=1)
self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
+
+ self.m.StubOutWithMock(resource.Resource, 'metadata_get')
+ # Note: in reality the group create is counted as a cooldown event, so
+ # there would actually be metadata here and the scale up would not
+ # happen
+ resource.Resource.metadata_get().MultipleTimes().AndReturn({})
+
self.m.ReplayAll()
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(1, len(rsrc.get_instance_names()))
@@ -1114,8 +1166,9 @@ class AutoScalingTest(HeatTestCase):
# Now move time on 61 seconds - Cooldown in template is 60
# so this should update the policy metadata, and the
# scaling group instances updated
- previous_meta = {timeutils.strtime(now):
- 'PercentChangeInCapacity : -50'}
+ previous_meta = {'cooldown': {now.isoformat():
+ 'PercentChangeInCapacity : -50'},
+ 'scaling_in_progress': False}
self.m.VerifyAll()
self.m.UnsetStubs()
@@ -1123,7 +1176,7 @@ class AutoScalingTest(HeatTestCase):
now = now + datetime.timedelta(seconds=61)
self.m.StubOutWithMock(resource.Resource, 'metadata_get')
- rsrc.metadata_get().AndReturn(previous_meta)
+ rsrc.metadata_get().MultipleTimes().AndReturn(previous_meta)
#stub for the metadata accesses while creating the two instances
resource.Resource.metadata_get()
@@ -1149,7 +1202,7 @@ class AutoScalingTest(HeatTestCase):
properties['Cooldown'] = '0'
self._stub_lb_reload(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -1161,23 +1214,27 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'PercentChangeInCapacity : -50')
self._stub_delete(1)
self.stub_ImageConstraint_validate(num=1)
+
+ init_meta = {'cooldown': {now.isoformat(): 'ExactCapacity : -50'},
+ 'scaling_in_progress': False}
+ self.m.StubOutWithMock(resource.Resource, 'metadata_get')
+ resource.Resource.metadata_get().MultipleTimes().AndReturn(init_meta)
self.m.ReplayAll()
+
rsrc.adjust(-50, 'PercentChangeInCapacity')
self.assertEqual(1, len(rsrc.get_instance_names()))
# Don't move time, since cooldown is zero, it should work
- previous_meta = {timeutils.strtime(now):
- 'PercentChangeInCapacity : -50'}
+ previous_meta = {'cooldown': {now.isoformat():
+ 'PercentChangeInCapacity : -50'},
+ 'scaling_in_progress': False}
self.m.VerifyAll()
self.m.UnsetStubs()
self.m.StubOutWithMock(resource.Resource, 'metadata_get')
- rsrc.metadata_get().AndReturn(previous_meta)
+ rsrc.metadata_get().MultipleTimes().AndReturn(previous_meta)
- #stub for the metadata accesses while creating the two instances
- resource.Resource.metadata_get()
- resource.Resource.metadata_get()
# raise by 200%, should work
self._stub_lb_reload(3, unset=False)
@@ -1213,7 +1270,7 @@ class AutoScalingTest(HeatTestCase):
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
@@ -1255,7 +1312,7 @@ class AutoScalingTest(HeatTestCase):
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
@@ -1298,7 +1355,7 @@ class AutoScalingTest(HeatTestCase):
properties['DesiredCapacity'] = '2'
self._stub_lb_reload(2)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 2')
+ self._stub_meta_expected(now, 'ExactCapacity : 2', 0)
self._stub_create(2)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -1327,7 +1384,7 @@ class AutoScalingTest(HeatTestCase):
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -1339,6 +1396,15 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+ up_policy = stack['WebServerScaleUpPolicy']
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().MultipleTimes().AndReturn({})
+ # Note: in reality the group create is counted as a cooldown event, so
+ # there would actually be metadata here and the scale up would not
+ # happen
+ rsrc.metadata_get().MultipleTimes().AndReturn({})
+
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
@@ -1350,7 +1416,8 @@ class AutoScalingTest(HeatTestCase):
# scaling group instances should be unchanged
# Note we have to stub Resource.metadata_get since up_policy isn't
# stored in the DB (because the stack hasn't really been created)
- previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
+ prev_meta = {'cooldown': {now.isoformat(): 'ChangeInCapacity : 1'},
+ 'scaling_in_progress': False}
self.m.VerifyAll()
self.m.UnsetStubs()
@@ -1359,8 +1426,9 @@ class AutoScalingTest(HeatTestCase):
timeutils.set_time_override(now)
self.addCleanup(timeutils.clear_time_override)
- self.m.StubOutWithMock(resource.Resource, 'metadata_get')
- up_policy.metadata_get().AndReturn(previous_meta)
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().AndReturn(prev_meta.copy())
self.m.ReplayAll()
up_policy.signal()
@@ -1376,7 +1444,7 @@ class AutoScalingTest(HeatTestCase):
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -1388,24 +1456,33 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+ up_policy = stack['WebServerScaleUpPolicy']
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().MultipleTimes().AndReturn({})
+ # Note: in reality the group create is counted as a cooldown event, so
+ # there would actually be metadata here and the scale up would not
+ # happen
+ rsrc.metadata_get().MultipleTimes().AndReturn({})
+
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
up_policy.signal()
self.assertEqual(2, len(rsrc.get_instance_names()))
- # Now move time on 61 seconds - Cooldown in template is 60
- # so this should trigger a scale-up
- previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
self.m.VerifyAll()
self.m.UnsetStubs()
- self.m.StubOutWithMock(resource.Resource, 'metadata_get')
- up_policy.metadata_get().AndReturn(previous_meta)
- rsrc.metadata_get().AndReturn(previous_meta)
+ # Now move time on 61 seconds - Cooldown in template is 60
+ # so this should trigger a scale-up
+ prev_meta = {'cooldown': {now.isoformat(): 'ChangeInCapacity : 1'},
+ 'scaling_in_progress': False}
- #stub for the metadata accesses while creating the additional instance
- resource.Resource.metadata_get()
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().MultipleTimes().AndReturn(prev_meta.copy())
+ rsrc.metadata_get().MultipleTimes().AndReturn(prev_meta.copy())
now = now + datetime.timedelta(seconds=61)
self._stub_lb_reload(3, unset=False)
@@ -1431,7 +1508,7 @@ class AutoScalingTest(HeatTestCase):
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -1449,16 +1526,15 @@ class AutoScalingTest(HeatTestCase):
self.assertEqual(2, len(rsrc.get_instance_names()))
# Now trigger another scale-up without changing time, should work
- previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
+ prev_meta = {'cooldown': {now.isoformat(): 'ChangeInCapacity : 1'},
+ 'scaling_in_progress': False}
self.m.VerifyAll()
self.m.UnsetStubs()
- self.m.StubOutWithMock(resource.Resource, 'metadata_get')
- up_policy.metadata_get().AndReturn(previous_meta)
- rsrc.metadata_get().AndReturn(previous_meta)
-
- #stub for the metadata accesses while creating the additional instance
- resource.Resource.metadata_get()
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().MultipleTimes().AndReturn(prev_meta.copy())
+ rsrc.metadata_get().MultipleTimes().AndReturn(prev_meta.copy())
self._stub_lb_reload(3, unset=False)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
@@ -1484,7 +1560,7 @@ class AutoScalingTest(HeatTestCase):
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
rsrc = self.create_scaling_group(t, stack, 'WebServerGroup')
@@ -1496,6 +1572,14 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+ init_meta = {'cooldown': {now.isoformat(): 'ExactCapacity : 1'},
+ 'scaling_in_progress': False}
+ up_policy = stack['WebServerScaleUpPolicy']
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().MultipleTimes().AndReturn(init_meta.copy())
+ rsrc.metadata_get().MultipleTimes().AndReturn(init_meta.copy())
+
self.m.ReplayAll()
up_policy = self.create_scaling_policy(t, stack,
'WebServerScaleUpPolicy')
@@ -1503,16 +1587,15 @@ class AutoScalingTest(HeatTestCase):
self.assertEqual(2, len(rsrc.get_instance_names()))
# Now trigger another scale-up without changing time, should work
- previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
+ prev_meta = {'cooldown': {now.isoformat(): 'ChangeInCapacity : 1'},
+ 'scaling_in_progress': False}
self.m.VerifyAll()
self.m.UnsetStubs()
- self.m.StubOutWithMock(resource.Resource, 'metadata_get')
- up_policy.metadata_get().AndReturn(previous_meta)
- rsrc.metadata_get().AndReturn(previous_meta)
-
- #stub for the metadata accesses while creating the additional instance
- resource.Resource.metadata_get()
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().MultipleTimes().AndReturn(prev_meta.copy())
+ rsrc.metadata_get().MultipleTimes().AndReturn(prev_meta.copy())
self._stub_lb_reload(3, unset=False)
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
@@ -1532,7 +1615,7 @@ class AutoScalingTest(HeatTestCase):
# Create initial group
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
@@ -1549,6 +1632,14 @@ class AutoScalingTest(HeatTestCase):
self._stub_meta_expected(now, 'ChangeInCapacity : 1', 2)
self._stub_create(1)
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().MultipleTimes().AndReturn({})
+ # Note: in reality the group create is counted as a cooldown event, so
+ # there would actually be metadata here and the scale up would not
+ # happen
+ rsrc.metadata_get().MultipleTimes().AndReturn({})
+
self.m.ReplayAll()
# Trigger alarm
@@ -1566,17 +1657,15 @@ class AutoScalingTest(HeatTestCase):
# Now move time on 61 seconds - Cooldown in template is 60
# so this should trigger a scale-up
- previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
+ prev_meta = {'cooldown': {now.isoformat(): 'ChangeInCapacity : 1'},
+ 'scaling_in_progress': False}
self.m.VerifyAll()
self.m.UnsetStubs()
- self.m.StubOutWithMock(resource.Resource, 'metadata_get')
- up_policy.metadata_get().AndReturn(previous_meta)
- rsrc.metadata_get().AndReturn(previous_meta)
-
- #stub for the metadata accesses while creating the two instances
- resource.Resource.metadata_get()
- resource.Resource.metadata_get()
+ self.m.StubOutWithMock(rsrc, 'metadata_get')
+ self.m.StubOutWithMock(up_policy, 'metadata_get')
+ up_policy.metadata_get().MultipleTimes().AndReturn(prev_meta.copy())
+ rsrc.metadata_get().MultipleTimes().AndReturn(prev_meta.copy())
now = now + datetime.timedelta(seconds=61)
@@ -1601,7 +1690,7 @@ class AutoScalingTest(HeatTestCase):
self._stub_lb_reload(1)
now = timeutils.utcnow()
- self._stub_meta_expected(now, 'ExactCapacity : 1')
+ self._stub_meta_expected(now, 'ExactCapacity : 1', 0)
self._stub_create(1)
self.m.ReplayAll()
diff --git a/heat/tests/test_heat_autoscaling_group.py b/heat/tests/test_heat_autoscaling_group.py
index 1e2cbc12f..12f8d6c10 100644
--- a/heat/tests/test_heat_autoscaling_group.py
+++ b/heat/tests/test_heat_autoscaling_group.py
@@ -386,7 +386,7 @@ class ScalingPolicyTest(HeatTestCase):
past = timeutils.strtime(timeutils.utcnow() -
datetime.timedelta(seconds=65))
- policy.metadata_set({past: 'ChangeInCapacity : 1'})
+ policy.metadata_set({'cooldown': {past: 'ChangeInCapacity : 1'}})
policy.signal()
self.assertEqual(3, len(group.get_instance_names()))