summaryrefslogtreecommitdiff
path: root/ironic/conductor/task_manager.py
diff options
context:
space:
mode:
authorDmitry Tantsur <dtantsur@redhat.com>2015-07-16 15:28:50 +0200
committerDmitry Tantsur <dtantsur@redhat.com>2015-07-20 17:32:07 +0200
commit65f3a31b949cab31f2902b0c4f4239a605661bfa (patch)
tree1f0a42d9f3cf19463746682608597c55a146fdf0 /ironic/conductor/task_manager.py
parenta7e81f17bdcdafe980672a30bc8915974c060e09 (diff)
downloadironic-65f3a31b949cab31f2902b0c4f4239a605661bfa.tar.gz
Allow upgrading shared lock to an exclusive one
New task method upgrade_lock is a simpler and nicer way of doing: if task.shared: task = task_manager.acquire(task.context, task.node.id) This is useful for e.g. periodic tasks to start with a shared lock and upgrade it if any changes are needed. One more use case is to allow vendor passthru implementations to decide if they actually need an exclusive lock instead of just providing them it by default. Both use cases are not covered by this change. Change-Id: I2c019f2855b61330b5b18cee34daa5f399a41f2c
Diffstat (limited to 'ironic/conductor/task_manager.py')
-rw-r--r--ironic/conductor/task_manager.py56
1 files changed, 39 insertions, 17 deletions
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index e4ace6743..56c71e632 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -182,26 +182,11 @@ class TaskManager(object):
self.context = context
self.node = None
+ self.node_id = node_id
self.shared = shared
self.fsm = states.machine.copy()
self._purpose = purpose
- self._debug_timer = time.time()
-
- # NodeLocked exceptions can be annoying. Let's try to alleviate
- # some of that pain by retrying our lock attempts. The retrying
- # module expects a wait_fixed value in milliseconds.
- @retrying.retry(
- retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
- stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
- wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
- def reserve_node():
- self.node = objects.Node.reserve(context, CONF.host, node_id)
- LOG.debug("Node %(node)s successfully reserved for %(purpose)s "
- "(took %(time).2f seconds)",
- {'node': node_id, 'purpose': purpose,
- 'time': time.time() - self._debug_timer})
- self._debug_timer = time.time()
try:
LOG.debug("Attempting to get %(type)s lock on node %(node)s (for "
@@ -209,8 +194,9 @@ class TaskManager(object):
{'type': 'shared' if shared else 'exclusive',
'node': node_id, 'purpose': purpose})
if not self.shared:
- reserve_node()
+ self._lock()
else:
+ self._debug_timer = time.time()
self.node = objects.Node.get(context, node_id)
self.ports = objects.Port.list_by_node_id(context, self.node.id)
self.driver = driver_factory.get_driver(driver_name or
@@ -228,6 +214,42 @@ class TaskManager(object):
with excutils.save_and_reraise_exception():
self.release_resources()
+ def _lock(self):
+ self._debug_timer = time.time()
+
+ # NodeLocked exceptions can be annoying. Let's try to alleviate
+ # some of that pain by retrying our lock attempts. The retrying
+ # module expects a wait_fixed value in milliseconds.
+ @retrying.retry(
+ retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
+ stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
+ wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
+ def reserve_node():
+ self.node = objects.Node.reserve(self.context, CONF.host,
+ self.node_id)
+ LOG.debug("Node %(node)s successfully reserved for %(purpose)s "
+ "(took %(time).2f seconds)",
+ {'node': self.node_id, 'purpose': self._purpose,
+ 'time': time.time() - self._debug_timer})
+ self._debug_timer = time.time()
+
+ reserve_node()
+
+ def upgrade_lock(self):
+ """Upgrade a shared lock to an exclusive lock.
+
+ Also reloads node object from the database.
+ Does nothing if lock is already exclusive.
+ """
+ if self.shared:
+ LOG.debug('Upgrading shared lock on node %(uuid)s for %(purpose)s '
+ 'to an exclusive one (shared lock was held %(time).2f '
+ 'seconds)',
+ {'uuid': self.node.uuid, 'purpose': self._purpose,
+ 'time': time.time() - self._debug_timer})
+ self._lock()
+ self.shared = False
+
def spawn_after(self, _spawn_method, *args, **kwargs):
"""Call this to spawn a thread to complete the task.