summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZane Bitter <zbitter@redhat.com>2015-12-07 17:57:57 -0500
committerSteve Baker <sbaker@redhat.com>2016-01-11 14:34:04 +1300
commitc0fa891ecfa60a11064a0c6dbe15850d4ac1f3d7 (patch)
treedacfa2ad34d4d4e5a49d9e44039c7a98e59c6b2b
parent032715014563c689ad294e599cfb9fdd0278a607 (diff)
downloadheat-c0fa891ecfa60a11064a0c6dbe15850d4ac1f3d7.tar.gz
Eliminate unnecessary sleeps during no-op update
For an update that involves e.g. stepping through the whole graph and verifying that nothing needs to be updated, we spend a lot of time sleeping unnecessarilty. Every task will exit without yielding (i.e. it will be complete after calling TaskRunner.start()), yet the DependencyTaskGroup yields after each set of tasks so the minimum sleep time in seconds is the maximum path length in the graph minus one. This change fixes that by removing nodes from the graph immediately if they are done immediately after having been started. Since the _ready() call returns an iterator, this allows any later tasks that were blocking only on this one to start immediately. To ensure that any tasks that are only blocking on this one _do_ appear later, iterate over the graph in topologically sorted order. The potential downside to this would be any time that actions complete quickly (i.e. without yielding), but we still need to throttle them. An obvious case might be a resource type with no check_create_complete() function - creating a lot of these in a row could result in quota failures on the target API. However, the Resource.action_handler_task() task always yields at least once even if there is no check, so this patch should not change its behaviour. Change-Id: I734561814d2784e710d0b9ec3ef7834f44f579b2 Closes-Bug: #1523303 (cherry picked from commit d66d57f187c7d8623526e9f928ae76d2cca9ecf9)
-rw-r--r--heat/engine/scheduler.py9
-rw-r--r--heat/tests/test_scheduler.py2
2 files changed, 7 insertions, 4 deletions
diff --git a/heat/engine/scheduler.py b/heat/engine/scheduler.py
index c676d0755..555947548 100644
--- a/heat/engine/scheduler.py
+++ b/heat/engine/scheduler.py
@@ -352,7 +352,8 @@ class DependencyTaskGroup(object):
of the error will be cancelled). Once all chains are complete, any
errors will be rolled up into an ExceptionGroup exception.
"""
- self._runners = dict((o, TaskRunner(task, o)) for o in dependencies)
+ self._keys = list(dependencies)
+ self._runners = dict((o, TaskRunner(task, o)) for o in self._keys)
self._graph = dependencies.graph(reverse=reverse)
self.error_wait_time = error_wait_time
self.aggregate_exceptions = aggregate_exceptions
@@ -375,6 +376,8 @@ class DependencyTaskGroup(object):
try:
for k, r in self._ready():
r.start()
+ if not r:
+ del self._graph[k]
yield
@@ -417,8 +420,8 @@ class DependencyTaskGroup(object):
Iterate over all subtasks that are ready to start - i.e. all their
dependencies have been satisfied but they have not yet been started.
"""
- for k, n in six.iteritems(self._graph):
- if not n:
+ for k in self._keys:
+ if not self._graph.get(k, True):
runner = self._runners[k]
if runner and not runner.started():
yield k, runner
diff --git a/heat/tests/test_scheduler.py b/heat/tests/test_scheduler.py
index 16a620a8b..29ca090cb 100644
--- a/heat/tests/test_scheduler.py
+++ b/heat/tests/test_scheduler.py
@@ -220,7 +220,7 @@ class DependencyTaskGroupTest(common.HeatTestCase):
self.steps = 0
self.m.StubOutWithMock(scheduler.TaskRunner, '_sleep')
with self._dep_test(('second', 'first')):
- scheduler.TaskRunner._sleep(None).AndReturn(None)
+ pass
def test_single_node(self):
with self._dep_test(('only', None)) as dummy: