summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2014-06-11 01:05:36 +0000
committerGerrit Code Review <review@openstack.org>2014-06-11 01:05:36 +0000
commit8f120e1a21738fe40c968c3379382851ea6e4daf (patch)
treef0e15aadbd77135629b20f014d7b45effe7623d7
parentbe2543413914bceb5821507a1d554d2b7c172cca (diff)
parent2b2022c331d78b5ca1df6d06a58d2229af3036b7 (diff)
downloadironic-8f120e1a21738fe40c968c3379382851ea6e4daf.tar.gz
Merge "TaskManager: Only support single node locking"
-rw-r--r--ironic/common/exception.py4
-rw-r--r--ironic/conductor/task_manager.py171
-rw-r--r--ironic/db/api.py27
-rw-r--r--ironic/db/sqlalchemy/api.py96
-rw-r--r--ironic/drivers/modules/pxe.py2
-rw-r--r--ironic/drivers/utils.py4
-rw-r--r--ironic/tests/conductor/test_task_manager.py497
-rw-r--r--ironic/tests/db/test_nodes.py146
-rw-r--r--ironic/tests/db/test_ports.py2
-rw-r--r--ironic/tests/drivers/test_ipminative.py24
-rw-r--r--ironic/tests/drivers/test_ipmitool.py30
-rw-r--r--ironic/tests/drivers/test_pxe.py78
-rw-r--r--ironic/tests/drivers/test_seamicro.py117
-rw-r--r--ironic/tests/drivers/test_ssh.py39
-rw-r--r--ironic/tests/drivers/test_utils.py2
15 files changed, 563 insertions, 676 deletions
diff --git a/ironic/common/exception.py b/ironic/common/exception.py
index 98b900059..89177ddc5 100644
--- a/ironic/common/exception.py
+++ b/ironic/common/exception.py
@@ -345,6 +345,10 @@ class NodeLocked(Conflict):
"after the current operation is completed.")
+class NodeNotLocked(Invalid):
+ message = _("Node %(node)s found not to be locked on release")
+
+
class NoFreeConductorWorker(TemporaryFailure):
message = _('Requested action cannot be performed due to lack of free '
'conductor workers.')
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index 4affd8e9c..829cb0e57 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -16,21 +16,17 @@
# under the License.
"""
-A context manager to peform a series of tasks on a set of resources.
+A context manager to perform a series of tasks on a set of resources.
-:class:`TaskManager` is a context manager, created on-demand to synchronize
-locking and simplify operations across a set of :class:`NodeResource`
-instances. Each NodeResource holds the data model for a node and its
-associated ports, as well as references to the driver singleton appropriate for
-that node.
+:class:`TaskManager` is a context manager, created on-demand to allow
+synchronized access to a node and its resources.
The :class:`TaskManager` will, by default, acquire an exclusive lock on
-its resources for the duration that the TaskManager instance exists.
-You may create a TaskManager instance without locking by passing
-"shared=True" when creating it, but certain operations on the resources
-held by such an instance of TaskManager will not be possible. Requiring
-this exclusive lock guards against parallel operations interfering with
-each other.
+a node for the duration that the TaskManager instance exists. You may
+create a TaskManager instance without locking by passing "shared=True"
+when creating it, but certain operations on the resources held by such
+an instance of TaskManager will not be possible. Requiring this exclusive
+lock guards against parallel operations interfering with each other.
A shared lock is useful when performing non-interfering operations,
such as validating the driver interfaces or the vendor_passthru method.
@@ -42,13 +38,21 @@ different hosts.
:class:`TaskManager` methods, as well as driver methods, may be decorated to
determine whether their invocation requires an exclusive lock.
-If you have a task with just a single node, the TaskManager instance
-exposes additional properties to access the node, driver, and ports
-in a short-hand fashion. For example:
+The TaskManager instance exposes certain node resources and properties as
+attributes that you may access:
+
+ task.context -- The context passed to TaskManager()
+ task.shared -- False if Node is locked, True if it is not locked. (The
+ 'shared' kwarg arg of TaskManager())
+ task.node -- The Node object
+ task.ports -- Ports belonging to the Node
+ task.driver -- The Driver for the Node, or the Driver based on the
+ 'driver_name' kwarg of TaskManager().
+
+Example usage:
with task_manager.acquire(context, node_id) as task:
- driver = task.node.driver
- driver.power.power_on(task.node)
+ task.driver.power.power_on(task.node)
If you need to execute task-requiring code in the background thread, the
TaskManager instance provides an interface to handle this for you, making
@@ -93,77 +97,67 @@ def require_exclusive_lock(f):
return wrapper
-def acquire(context, node_ids, shared=False, driver_name=None):
- """Shortcut for acquiring a lock on one or more Nodes.
+def acquire(context, node_id, shared=False, driver_name=None):
+ """Shortcut for acquiring a lock on a Node.
:param context: Request context.
- :param node_ids: A list of ids or uuids of nodes to lock.
+ :param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: Name of Driver. Default: None.
:returns: An instance of :class:`TaskManager`.
"""
- return TaskManager(context, node_ids, shared, driver_name)
+ return TaskManager(context, node_id, shared=shared,
+ driver_name=driver_name)
class TaskManager(object):
"""Context manager for tasks.
This class wraps the locking, driver loading, and acquisition
- of related resources (eg, Nodes and Ports) when beginning a unit of work.
+ of related resources (eg, Node and Ports) when beginning a unit of work.
"""
- def __init__(self, context, node_ids, shared=False, driver_name=None):
+ def __init__(self, context, node_id, shared=False, driver_name=None):
"""Create a new TaskManager.
- Acquire a lock atomically on a non-empty set of nodes. The lock
- can be either shared or exclusive. Shared locks may be used for
- read-only or non-disruptive actions only, and must be considerate
- to what other threads may be doing on the nodes at the same time.
+ Acquire a lock on a node. The lock can be either shared or
+ exclusive. Shared locks may be used for read-only or
+ non-disruptive actions only, and must be considerate to what
+ other threads may be doing on the same node at the same time.
:param context: request context
- :param node_ids: A list of ids or uuids of nodes to lock.
+ :param node_id: ID or UUID of node to lock.
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param driver_name: The name of the driver to load, if different
from the Node's current driver.
:raises: DriverNotFound
- :raises: NodeAlreadyLocked
+ :raises: NodeNotFound
+ :raises: NodeLocked
"""
- self.context = context
- self.resources = []
- self.shared = shared
- self.dbapi = dbapi.get_instance()
+ self._dbapi = dbapi.get_instance()
self._spawn_method = None
- # instead of generating an exception, DTRT and convert to a list
- if not isinstance(node_ids, list):
- node_ids = [node_ids]
+ self.context = context
+ self.node = None
+ self.shared = shared
- locked_node_list = []
try:
- for id in node_ids:
- if not self.shared:
- # NOTE(deva): Only lock one node at a time so we can ensure
- # that only the right nodes are unlocked.
- # However, reserve_nodes takes and returns a
- # list. This should be refactored.
- node = self.dbapi.reserve_nodes(CONF.host, [id])[0]
- locked_node_list.append(node.id)
- else:
- node = objects.Node.get(context, id)
- ports = self.dbapi.get_ports_by_node_id(node.id)
- driver = driver_factory.get_driver(driver_name or node.driver)
-
- self.resources.append(NodeResource(node, ports, driver))
+ if not self.shared:
+ self.node = self._dbapi.reserve_node(CONF.host, node_id)
+ else:
+ self.node = objects.Node.get(context, node_id)
+ self.ports = self._dbapi.get_ports_by_node_id(self.node.id)
+ self.driver = driver_factory.get_driver(driver_name or
+ self.node.driver)
except Exception:
with excutils.save_and_reraise_exception():
- if locked_node_list:
- self.dbapi.release_nodes(CONF.host, locked_node_list)
+ self.release_resources()
def spawn_after(self, _spawn_method, *args, **kwargs):
"""Call this to spawn a thread to complete the task."""
@@ -172,61 +166,29 @@ class TaskManager(object):
self._spawn_kwargs = kwargs
def release_resources(self):
- """Release any resources for which this TaskManager
- was holding an exclusive lock.
+ """Unlock a node and release resources.
+
+ If an exclusive lock is held, unlock the node. Reset attributes
+ to make it clear that this instance of TaskManager should no
+ longer be accessed.
"""
if not self.shared:
- if self.resources:
- node_ids = [r.node.id for r in self.resources]
- try:
- self.dbapi.release_nodes(CONF.host, node_ids)
- except exception.NodeNotFound:
- # squelch the exception if the node was deleted
- # within the task's context.
- pass
- self.resources = []
+ try:
+ if self.node:
+ self._dbapi.release_node(CONF.host, self.node.id)
+ except exception.NodeNotFound:
+ # squelch the exception if the node was deleted
+ # within the task's context.
+ pass
+ self.node = None
+ self.driver = None
+ self.ports = None
def _thread_release_resources(self, t):
"""Thread.link() callback to release resources."""
self.release_resources()
- @property
- def node(self):
- """Special accessor for single-node tasks."""
- if len(self.resources) == 1:
- return self.resources[0].node
- else:
- raise AttributeError(_("Multi-node TaskManager "
- "has no attribute 'node'"))
-
- @property
- def ports(self):
- """Special accessor for single-node tasks."""
- if len(self.resources) == 1:
- return self.resources[0].ports
- else:
- raise AttributeError(_("Multi-node TaskManager "
- "has no attribute 'ports'"))
-
- @property
- def driver(self):
- """Special accessor for single-node tasks."""
- if len(self.resources) == 1:
- return self.resources[0].driver
- else:
- raise AttributeError(_("Multi-node TaskManager "
- "has no attribute 'driver'"))
-
- @property
- def node_manager(self):
- """Special accessor for single-node manager."""
- if len(self.resources) == 1:
- return self.resources[0]
- else:
- raise AttributeError(_("Multi-node TaskManager "
- "can't select single node manager from the list"))
-
def __enter__(self):
return self
@@ -262,12 +224,3 @@ class TaskManager(object):
thread.cancel()
self.release_resources()
self.release_resources()
-
-
-class NodeResource(object):
- """Wrapper to hold a Node, its associated Port(s), and its Driver."""
-
- def __init__(self, node, ports, driver):
- self.node = node
- self.ports = ports
- self.driver = driver
diff --git a/ironic/db/api.py b/ironic/db/api.py
index 90f406476..ad4351bc8 100644
--- a/ironic/db/api.py
+++ b/ironic/db/api.py
@@ -95,28 +95,29 @@ class Connection(object):
"""
@abc.abstractmethod
- def reserve_nodes(self, tag, nodes):
- """Reserve a set of nodes atomically.
+ def reserve_node(self, tag, node_id):
+ """Reserve a node.
To prevent other ManagerServices from manipulating the given
- Nodes while a Task is performed, mark them all reserved by this host.
+ Node while a Task is performed, mark it reserved by this host.
:param tag: A string uniquely identifying the reservation holder.
- :param nodes: A list of node id or uuid.
- :returns: A list of the reserved node refs.
- :raises: NodeNotFound if any node is not found.
- :raises: NodeAlreadyReserved if any node is already reserved.
+ :param node_id: A node id or uuid.
+ :returns: A Node object.
+ :raises: NodeNotFound if the node is not found.
+ :raises: NodeLocked if the node is already reserved.
"""
@abc.abstractmethod
- def release_nodes(self, tag, nodes):
- """Release the reservation on a set of nodes atomically.
+ def release_node(self, tag, node_id):
+ """Release the reservation on a node.
:param tag: A string uniquely identifying the reservation holder.
- :param nodes: A list of node id or uuid.
- :raises: NodeNotFound if any node is not found.
- :raises: NodeAlreadyReserved if any node could not be released
- because it was not reserved by this host.
+ :param node_id: A node id or uuid.
+ :raises: NodeNotFound if the node is not found.
+ :raises: NodeLocked if the node is reserved by another host.
+ :raises: NodeNotLocked if the node was found to not have a
+ reservation at all.
"""
@abc.abstractmethod
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index b62c576f6..8a17865a9 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -107,28 +107,6 @@ def add_identity_filter(query, value):
raise exception.InvalidIdentity(identity=value)
-def add_filter_by_many_identities(query, model, values):
- """Adds an identity filter to a query for values list.
-
- Filters results by ID, if supplied values contain a valid integer.
- Otherwise attempts to filter results by UUID.
-
- :param query: Initial query to add filter to.
- :param model: Model for filter.
- :param values: Values for filtering results by.
- :return: tuple (Modified query, filter field name).
- """
- if not values:
- raise exception.InvalidIdentity(identity=values)
- value = values[0]
- if utils.is_int_like(value):
- return query.filter(getattr(model, 'id').in_(values)), 'id'
- elif utils.is_uuid_like(value):
- return query.filter(getattr(model, 'uuid').in_(values)), 'uuid'
- else:
- raise exception.InvalidIdentity(identity=value)
-
-
def add_port_filter(query, value):
"""Adds a port-specific filter to a query.
@@ -186,21 +164,6 @@ def _paginate_query(model, limit=None, marker=None, sort_key=None,
return query.all()
-def _check_node_already_locked(query, query_by):
- no_reserv = None
- locked_ref = query.filter(models.Node.reservation != no_reserv).first()
- if locked_ref:
- raise exception.NodeLocked(node=locked_ref[query_by],
- host=locked_ref['reservation'])
-
-
-def _handle_node_lock_not_found(nodes, query, query_by):
- refs = query.all()
- existing = [ref[query_by] for ref in refs]
- missing = set(nodes) - set(existing)
- raise exception.NodeNotFound(node=missing.pop())
-
-
class Connection(api.Connection):
"""SqlAlchemy connection."""
@@ -261,44 +224,43 @@ class Connection(api.Connection):
sort_key, sort_dir, query)
@objects.objectify(objects.Node)
- def reserve_nodes(self, tag, nodes):
- # assume nodes does not contain duplicates
- # Ensure consistent sort order so we don't run into deadlocks.
- nodes.sort()
+ def reserve_node(self, tag, node_id):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
- query, query_by = add_filter_by_many_identities(query, models.Node,
- nodes)
- # Be optimistic and assume we usually get a reservation.
- _check_node_already_locked(query, query_by)
- count = query.update({'reservation': tag},
- synchronize_session=False)
-
- if count != len(nodes):
- # one or more node id not found
- _handle_node_lock_not_found(nodes, query, query_by)
-
- return query.all()
+ query = add_identity_filter(query, node_id)
+ # be optimistic and assume we usually create a reservation
+ count = query.filter_by(reservation=None).update(
+ {'reservation': tag}, synchronize_session=False)
+ try:
+ node = query.one()
+ if count != 1:
+ # Nothing updated and node exists. Must already be
+ # locked.
+ raise exception.NodeLocked(node=node_id,
+ host=node['reservation'])
+ return node
+ except NoResultFound:
+ raise exception.NodeNotFound(node_id)
- def release_nodes(self, tag, nodes):
- # assume nodes does not contain duplicates
+ def release_node(self, tag, node_id):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
- query, query_by = add_filter_by_many_identities(query, models.Node,
- nodes)
+ query = add_identity_filter(query, node_id)
# be optimistic and assume we usually release a reservation
- count = query.filter_by(reservation=tag).\
- update({'reservation': None}, synchronize_session=False)
- if count != len(nodes):
- # we updated not all nodes
- if len(nodes) != query.count():
- # one or more node id not found
- _handle_node_lock_not_found(nodes, query, query_by)
- else:
- # one or more node had reservation != tag
- _check_node_already_locked(query, query_by)
+ count = query.filter_by(reservation=tag).update(
+ {'reservation': None}, synchronize_session=False)
+ try:
+ if count != 1:
+ node = query.one()
+ if node['reservation'] is None:
+ raise exception.NodeNotLocked(node=node_id)
+ else:
+ raise exception.NodeLocked(node=node_id,
+ host=node['reservation'])
+ except NoResultFound:
+ raise exception.NodeNotFound(node_id)
def create_node(self, values):
# ensure defaults are present for new nodes
diff --git a/ironic/drivers/modules/pxe.py b/ironic/drivers/modules/pxe.py
index f0f227c27..270f9ff22 100644
--- a/ironic/drivers/modules/pxe.py
+++ b/ironic/drivers/modules/pxe.py
@@ -200,7 +200,7 @@ def _get_node_vif_ids(task):
"""
port_vifs = {}
- for port in task.resources[0].ports:
+ for port in task.ports:
vif = port.extra.get('vif_port_id')
if vif:
port_vifs[port.uuid] = vif
diff --git a/ironic/drivers/utils.py b/ironic/drivers/utils.py
index b77867fa4..d0435fc81 100644
--- a/ironic/drivers/utils.py
+++ b/ironic/drivers/utils.py
@@ -92,6 +92,4 @@ def get_node_mac_addresses(task):
:param task: a TaskManager instance containing the node to act on.
:returns: A list of MAC addresses in the format xx:xx:xx:xx:xx:xx.
"""
- for r in task.resources:
- if r.node.id == task.node.id:
- return [p.address for p in r.ports]
+ return [p.address for p in task.ports]
diff --git a/ironic/tests/conductor/test_task_manager.py b/ironic/tests/conductor/test_task_manager.py
index 230e708be..1643348e4 100644
--- a/ironic/tests/conductor/test_task_manager.py
+++ b/ironic/tests/conductor/test_task_manager.py
@@ -17,169 +17,256 @@
"""Tests for :class:`ironic.conductor.task_manager`."""
-from testtools import matchers
-
import eventlet
from eventlet import greenpool
import mock
from ironic.common import driver_factory
from ironic.common import exception
-from ironic.common import utils as ironic_utils
from ironic.conductor import task_manager
from ironic.db import api as dbapi
from ironic import objects
-from ironic.openstack.common import context
-
from ironic.tests import base as tests_base
-from ironic.tests.conductor import utils as mgr_utils
-from ironic.tests.db import base as db_base
-from ironic.tests.objects import utils as obj_utils
-def create_fake_node(ctxt, i):
- node = obj_utils.create_test_node(ctxt,
- id=i,
- uuid=ironic_utils.generate_uuid())
- return node.uuid
+@mock.patch.object(objects.Node, 'get')
+@mock.patch.object(dbapi.IMPL, 'release_node')
+@mock.patch.object(dbapi.IMPL, 'reserve_node')
+@mock.patch.object(driver_factory, 'get_driver')
+@mock.patch.object(dbapi.IMPL, 'get_ports_by_node_id')
+class TaskManagerTestCase(tests_base.TestCase):
+ def setUp(self):
+ super(TaskManagerTestCase, self).setUp()
+ self.host = 'test-host'
+ self.config(host=self.host)
+ self.context = mock.sentinel.context
+ self.node = mock.Mock(spec_set=objects.Node)
+
+ def test_excl_lock(self, get_ports_mock, get_driver_mock,
+ reserve_mock, release_mock, node_get_mock):
+ reserve_mock.return_value = self.node
+ with task_manager.TaskManager(self.context, 'fake-node-id') as task:
+ self.assertEqual(self.context, task.context)
+ self.assertEqual(self.node, task.node)
+ self.assertEqual(get_ports_mock.return_value, task.ports)
+ self.assertEqual(get_driver_mock.return_value, task.driver)
+ self.assertFalse(task.shared)
+
+ reserve_mock.assert_called_once_with(self.host, 'fake-node-id')
+ get_ports_mock.assert_called_once_with(self.node.id)
+ get_driver_mock.assert_called_once_with(self.node.driver)
+ release_mock.assert_called_once_with(self.host, self.node.id)
+ self.assertFalse(node_get_mock.called)
+
+ def test_excl_lock_with_driver(self, get_ports_mock, get_driver_mock,
+ reserve_mock, release_mock,
+ node_get_mock):
+ reserve_mock.return_value = self.node
+ with task_manager.TaskManager(self.context, 'fake-node-id',
+ driver_name='fake-driver') as task:
+ self.assertEqual(self.context, task.context)
+ self.assertEqual(self.node, task.node)
+ self.assertEqual(get_ports_mock.return_value, task.ports)
+ self.assertEqual(get_driver_mock.return_value, task.driver)
+ self.assertFalse(task.shared)
+
+ reserve_mock.assert_called_once_with(self.host, 'fake-node-id')
+ get_ports_mock.assert_called_once_with(self.node.id)
+ get_driver_mock.assert_called_once_with('fake-driver')
+ release_mock.assert_called_once_with(self.host, self.node.id)
+ self.assertFalse(node_get_mock.called)
+
+ def test_excl_nested_acquire(self, get_ports_mock, get_driver_mock,
+ reserve_mock, release_mock,
+ node_get_mock):
+ node2 = mock.Mock(spec_set=objects.Node)
+
+ reserve_mock.return_value = self.node
+ get_ports_mock.return_value = mock.sentinel.ports1
+ get_driver_mock.return_value = mock.sentinel.driver1
+
+ with task_manager.TaskManager(self.context, 'node-id1') as task:
+ reserve_mock.return_value = node2
+ get_ports_mock.return_value = mock.sentinel.ports2
+ get_driver_mock.return_value = mock.sentinel.driver2
+ with task_manager.TaskManager(self.context, 'node-id2') as task2:
+ self.assertEqual(self.context, task.context)
+ self.assertEqual(self.node, task.node)
+ self.assertEqual(mock.sentinel.ports1, task.ports)
+ self.assertEqual(mock.sentinel.driver1, task.driver)
+ self.assertFalse(task.shared)
+ self.assertEqual(self.context, task2.context)
+ self.assertEqual(node2, task2.node)
+ self.assertEqual(mock.sentinel.ports2, task2.ports)
+ self.assertEqual(mock.sentinel.driver2, task2.driver)
+ self.assertFalse(task2.shared)
+
+ self.assertEqual([mock.call(self.host, 'node-id1'),
+ mock.call(self.host, 'node-id2')],
+ reserve_mock.call_args_list)
+ self.assertEqual([mock.call(self.node.id), mock.call(node2.id)],
+ get_ports_mock.call_args_list)
+ self.assertEqual([mock.call(self.node.driver),
+ mock.call(node2.driver)],
+ get_driver_mock.call_args_list)
+ # release should be in reverse order
+ self.assertEqual([mock.call(self.host, node2.id),
+ mock.call(self.host, self.node.id)],
+ release_mock.call_args_list)
+ self.assertFalse(node_get_mock.called)
+
+ def test_excl_lock_reserve_exception(self, get_ports_mock,
+ get_driver_mock, reserve_mock,
+ release_mock, node_get_mock):
+ reserve_mock.side_effect = exception.NodeLocked(node='foo',
+ host='foo')
+ self.assertRaises(exception.NodeLocked,
+ task_manager.TaskManager,
+ self.context,
+ 'fake-node-id')
-def ContainsUUIDs(uuids):
- def _task_uuids(task):
- return sorted([r.node.uuid for r in task.resources])
+ reserve_mock.assert_called_once_with(self.host, 'fake-node-id')
+ self.assertFalse(get_ports_mock.called)
+ self.assertFalse(get_driver_mock.called)
+ self.assertFalse(release_mock.called)
+ self.assertFalse(node_get_mock.called)
- return matchers.AfterPreprocessing(
- _task_uuids, matchers.Equals(uuids))
+ def test_excl_lock_get_ports_exception(self, get_ports_mock,
+ get_driver_mock, reserve_mock,
+ release_mock, node_get_mock):
+ reserve_mock.return_value = self.node
+ get_ports_mock.side_effect = exception.IronicException('foo')
+ self.assertRaises(exception.IronicException,
+ task_manager.TaskManager,
+ self.context,
+ 'fake-node-id')
-class TaskManagerSetup(db_base.DbTestCase):
+ reserve_mock.assert_called_once_with(self.host, 'fake-node-id')
+ get_ports_mock.assert_called_once_with(self.node.id)
+ self.assertFalse(get_driver_mock.called)
+ release_mock.assert_called_once_with(self.host, self.node.id)
+ self.assertFalse(node_get_mock.called)
- def setUp(self):
- super(TaskManagerSetup, self).setUp()
- self.dbapi = dbapi.get_instance()
- self.context = context.get_admin_context()
- mgr_utils.mock_the_extension_manager()
- self.driver = driver_factory.get_driver("fake")
- self.config(host='test-host')
+ def test_excl_lock_get_driver_exception(self, get_ports_mock,
+ get_driver_mock, reserve_mock,
+ release_mock, node_get_mock):
+ reserve_mock.return_value = self.node
+ get_driver_mock.side_effect = exception.DriverNotFound(
+ driver_name='foo')
+ self.assertRaises(exception.DriverNotFound,
+ task_manager.TaskManager,
+ self.context,
+ 'fake-node-id')
+
+ reserve_mock.assert_called_once_with(self.host, 'fake-node-id')
+ get_ports_mock.assert_called_once_with(self.node.id)
+ get_driver_mock.assert_called_once_with(self.node.driver)
+ release_mock.assert_called_once_with(self.host, self.node.id)
+ self.assertFalse(node_get_mock.called)
+
+ def test_shared_lock(self, get_ports_mock, get_driver_mock,
+ reserve_mock, release_mock, node_get_mock):
+ node_get_mock.return_value = self.node
+ with task_manager.TaskManager(self.context, 'fake-node-id',
+ shared=True) as task:
+ self.assertEqual(self.context, task.context)
+ self.assertEqual(self.node, task.node)
+ self.assertEqual(get_ports_mock.return_value, task.ports)
+ self.assertEqual(get_driver_mock.return_value, task.driver)
+ self.assertTrue(task.shared)
+
+ self.assertFalse(reserve_mock.called)
+ self.assertFalse(release_mock.called)
+ node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
+ get_ports_mock.assert_called_once_with(self.node.id)
+ get_driver_mock.assert_called_once_with(self.node.driver)
+
+ def test_shared_lock_with_driver(self, get_ports_mock, get_driver_mock,
+ reserve_mock, release_mock,
+ node_get_mock):
+ node_get_mock.return_value = self.node
+ with task_manager.TaskManager(self.context,
+ 'fake-node-id',
+ shared=True,
+ driver_name='fake-driver') as task:
+ self.assertEqual(self.context, task.context)
+ self.assertEqual(self.node, task.node)
+ self.assertEqual(get_ports_mock.return_value, task.ports)
+ self.assertEqual(get_driver_mock.return_value, task.driver)
+ self.assertTrue(task.shared)
+
+ self.assertFalse(reserve_mock.called)
+ self.assertFalse(release_mock.called)
+ node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
+ get_ports_mock.assert_called_once_with(self.node.id)
+ get_driver_mock.assert_called_once_with('fake-driver')
-class TaskManagerTestCase(TaskManagerSetup):
+ def test_shared_lock_node_get_exception(self, get_ports_mock,
+ get_driver_mock, reserve_mock,
+ release_mock, node_get_mock):
+ node_get_mock.side_effect = exception.NodeNotFound(node='foo')
- def setUp(self):
- super(TaskManagerTestCase, self).setUp()
- self.uuids = [create_fake_node(self.context, i) for i in range(1, 6)]
- self.uuids.sort()
-
- def test_task_manager_gets_node(self):
- node_uuid = self.uuids[0]
- task = task_manager.TaskManager(self.context, node_uuid)
- self.assertEqual(node_uuid, task.node.uuid)
-
- def test_task_manager_updates_db(self):
- node_uuid = self.uuids[0]
- node = objects.Node.get_by_uuid(self.context, node_uuid)
- self.assertIsNone(node.reservation)
-
- with task_manager.acquire(self.context, node_uuid) as task:
- self.assertEqual(node.uuid, task.node.uuid)
- node.refresh(self.context)
- self.assertEqual('test-host', node.reservation)
-
- node.refresh(self.context)
- self.assertIsNone(node.reservation)
-
- def test_get_many_nodes(self):
- uuids = self.uuids[1:3]
-
- with task_manager.acquire(self.context, uuids) as task:
- self.assertThat(task, ContainsUUIDs(uuids))
- for node in [r.node for r in task.resources]:
- self.assertEqual('test-host', node.reservation)
-
- # Ensure all reservations are cleared
- for uuid in self.uuids:
- node = objects.Node.get_by_uuid(self.context, uuid)
- self.assertIsNone(node.reservation)
-
- def test_get_nodes_nested(self):
- uuids = self.uuids[0:2]
- more_uuids = self.uuids[3:4]
-
- with task_manager.acquire(self.context, uuids) as task:
- self.assertThat(task, ContainsUUIDs(uuids))
- with task_manager.acquire(self.context,
- more_uuids) as another_task:
- self.assertThat(another_task, ContainsUUIDs(more_uuids))
-
- def test_get_shared_lock(self):
- uuids = self.uuids[0:2]
-
- # confirm we can elevate from shared -> exclusive
- with task_manager.acquire(self.context, uuids, shared=True) as task:
- self.assertThat(task, ContainsUUIDs(uuids))
- with task_manager.acquire(self.context, uuids,
- shared=False) as inner_task:
- self.assertThat(inner_task, ContainsUUIDs(uuids))
-
- # confirm someone else can still get a shared lock
- with task_manager.acquire(self.context, uuids, shared=False) as task:
- self.assertThat(task, ContainsUUIDs(uuids))
- with task_manager.acquire(self.context, uuids,
- shared=True) as inner_task:
- self.assertThat(inner_task, ContainsUUIDs(uuids))
-
- def test_get_one_node_already_locked(self):
- node_uuid = self.uuids[0]
- task_manager.TaskManager(self.context, node_uuid)
-
- # Check that db node reservation is still set
- # if another TaskManager attempts to acquire the same node
- self.assertRaises(exception.NodeLocked,
+ self.assertRaises(exception.NodeNotFound,
task_manager.TaskManager,
- self.context, node_uuid)
- node = objects.Node.get_by_uuid(self.context, node_uuid)
- self.assertEqual('test-host', node.reservation)
+ self.context,
+ 'fake-node-id',
+ shared=True)
+
+ self.assertFalse(reserve_mock.called)
+ self.assertFalse(release_mock.called)
+ node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
+ self.assertFalse(get_ports_mock.called)
+ self.assertFalse(get_driver_mock.called)
- def test_get_many_nodes_some_already_locked(self):
- unlocked_node_uuids = self.uuids[0:2] + self.uuids[3:5]
- locked_node_uuid = self.uuids[2]
- task_manager.TaskManager(self.context, locked_node_uuid)
+ def test_shared_lock_get_ports_exception(self, get_ports_mock,
+ get_driver_mock, reserve_mock,
+ release_mock, node_get_mock):
+ node_get_mock.return_value = self.node
+ get_ports_mock.side_effect = exception.IronicException('foo')
- # Check that none of the other nodes are reserved
- # and the one which we first locked has not been unlocked
- self.assertRaises(exception.NodeLocked,
+ self.assertRaises(exception.IronicException,
task_manager.TaskManager,
self.context,
- self.uuids)
- node = objects.Node.get_by_uuid(self.context, locked_node_uuid)
- self.assertEqual('test-host', node.reservation)
- for uuid in unlocked_node_uuids:
- node = objects.Node.get_by_uuid(self.context, uuid)
- self.assertIsNone(node.reservation)
-
- def test_get_one_node_driver_load_exception(self):
- node_uuid = self.uuids[0]
+ 'fake-node-id',
+ shared=True)
+
+ self.assertFalse(reserve_mock.called)
+ self.assertFalse(release_mock.called)
+ node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
+ get_ports_mock.assert_called_once_with(self.node.id)
+ self.assertFalse(get_driver_mock.called)
+
+ def test_shared_lock_get_driver_exception(self, get_ports_mock,
+ get_driver_mock, reserve_mock,
+ release_mock, node_get_mock):
+ node_get_mock.return_value = self.node
+ get_driver_mock.side_effect = exception.DriverNotFound(
+ driver_name='foo')
+
self.assertRaises(exception.DriverNotFound,
task_manager.TaskManager,
- self.context, node_uuid,
- driver_name='no-such-driver')
-
- # Check that db node reservation is not set.
- node = objects.Node.get_by_uuid(self.context, node_uuid)
- self.assertIsNone(node.reservation)
-
- @mock.patch.object(driver_factory, 'get_driver')
- @mock.patch.object(dbapi.IMPL, 'get_ports_by_node_id')
- @mock.patch.object(dbapi.IMPL, 'reserve_nodes')
- def test_spawn_after(self, reserve_mock, get_ports_mock,
- get_driver_mock):
+ self.context,
+ 'fake-node-id',
+ shared=True)
+
+ self.assertFalse(reserve_mock.called)
+ self.assertFalse(release_mock.called)
+ node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
+ get_ports_mock.assert_called_once_with(self.node.id)
+ get_driver_mock.assert_called_once_with(self.node.driver)
+
+ def test_spawn_after(self, get_ports_mock, get_driver_mock,
+ reserve_mock, release_mock, node_get_mock):
thread_mock = mock.Mock(spec_set=['link', 'cancel'])
spawn_mock = mock.Mock(return_value=thread_mock)
- release_mock = mock.Mock()
+ task_release_mock = mock.Mock()
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
- task.release_resources = release_mock
+ task.release_resources = task_release_mock
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
thread_mock.link.assert_called_once_with(
@@ -188,139 +275,105 @@ class TaskManagerTestCase(TaskManagerSetup):
# Since we mocked link(), we're testing that __exit__ didn't
# release resources pending the finishing of the background
# thread
- self.assertFalse(release_mock.called)
+ self.assertFalse(task_release_mock.called)
- @mock.patch.object(driver_factory, 'get_driver')
- @mock.patch.object(dbapi.IMPL, 'get_ports_by_node_id')
- @mock.patch.object(dbapi.IMPL, 'reserve_nodes')
- def test_spawn_after_exception_while_yielded(self, reserve_mock,
- get_ports_mock,
- get_driver_mock):
+ def test_spawn_after_exception_while_yielded(self, get_ports_mock,
+ get_driver_mock,
+ reserve_mock,
+ release_mock,
+ node_get_mock):
spawn_mock = mock.Mock()
- release_mock = mock.Mock()
+ task_release_mock = mock.Mock()
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
- task.release_resources = release_mock
+ task.release_resources = task_release_mock
raise exception.IronicException('foo')
self.assertRaises(exception.IronicException, _test_it)
self.assertFalse(spawn_mock.called)
- release_mock.assert_called_once_with()
+ task_release_mock.assert_called_once_with()
- @mock.patch.object(driver_factory, 'get_driver')
- @mock.patch.object(dbapi.IMPL, 'get_ports_by_node_id')
- @mock.patch.object(dbapi.IMPL, 'reserve_nodes')
- def test_spawn_after_spawn_fails(self, reserve_mock, get_ports_mock,
- get_driver_mock):
+ def test_spawn_after_spawn_fails(self, get_ports_mock, get_driver_mock,
+ reserve_mock, release_mock,
+ node_get_mock):
spawn_mock = mock.Mock(side_effect=exception.IronicException('foo'))
- release_mock = mock.Mock()
+ task_release_mock = mock.Mock()
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
- task.release_resources = release_mock
+ task.release_resources = task_release_mock
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
- release_mock.assert_called_once_with()
+ task_release_mock.assert_called_once_with()
- @mock.patch.object(driver_factory, 'get_driver')
- @mock.patch.object(dbapi.IMPL, 'get_ports_by_node_id')
- @mock.patch.object(dbapi.IMPL, 'reserve_nodes')
- def test_spawn_after_link_fails(self, reserve_mock, get_ports_mock,
- get_driver_mock):
+ def test_spawn_after_link_fails(self, get_ports_mock, get_driver_mock,
+ reserve_mock, release_mock,
+ node_get_mock):
thread_mock = mock.Mock(spec_set=['link', 'cancel'])
thread_mock.link.side_effect = exception.IronicException('foo')
spawn_mock = mock.Mock(return_value=thread_mock)
- release_mock = mock.Mock()
+ task_release_mock = mock.Mock()
thr_release_mock = mock.Mock(spec_set=[])
def _test_it():
with task_manager.TaskManager(self.context, 'node-id') as task:
task.spawn_after(spawn_mock, 1, 2, foo='bar', cat='meow')
task._thread_release_resources = thr_release_mock
- task.release_resources = release_mock
+ task.release_resources = task_release_mock
self.assertRaises(exception.IronicException, _test_it)
spawn_mock.assert_called_once_with(1, 2, foo='bar', cat='meow')
thread_mock.link.assert_called_once_with(thr_release_mock)
thread_mock.cancel.assert_called_once_with()
- release_mock.assert_called_once_with()
+ task_release_mock.assert_called_once_with()
+
+@task_manager.require_exclusive_lock
+def _req_excl_lock_method(*args, **kwargs):
+ return (args, kwargs)
-class ExclusiveLockDecoratorTestCase(TaskManagerSetup):
+class ExclusiveLockDecoratorTestCase(tests_base.TestCase):
def setUp(self):
super(ExclusiveLockDecoratorTestCase, self).setUp()
- self.uuids = [create_fake_node(self.context, 123)]
-
- def test_require_exclusive_lock(self):
- @task_manager.require_exclusive_lock
- def do_state_change(task):
- for r in task.resources:
- task.dbapi.update_node(r.node.uuid,
- {'power_state': 'test-state'})
-
- with task_manager.acquire(self.context, self.uuids,
- shared=True) as task:
- self.assertRaises(exception.ExclusiveLockRequired,
- do_state_change,
- task)
-
- with task_manager.acquire(self.context, self.uuids,
- shared=False) as task:
- do_state_change(task)
-
- for uuid in self.uuids:
- res = objects.Node.get_by_uuid(self.context, uuid)
- self.assertEqual('test-state', res.power_state)
-
- @task_manager.require_exclusive_lock
- def _do_state_change(self, task):
- for r in task.resources:
- task.dbapi.update_node(r.node.uuid,
- {'power_state': 'test-state'})
-
- def test_require_exclusive_lock_on_object(self):
- with task_manager.acquire(self.context, self.uuids,
- shared=True) as task:
- self.assertRaises(exception.ExclusiveLockRequired,
- self._do_state_change,
- task)
-
- with task_manager.acquire(self.context, self.uuids,
- shared=False) as task:
- self._do_state_change(task)
-
- for uuid in self.uuids:
- res = objects.Node.get_by_uuid(self.context, uuid)
- self.assertEqual('test-state', res.power_state)
-
- def test_one_node_per_task_properties(self):
- with task_manager.acquire(self.context, self.uuids) as task:
- self.assertEqual(task.node, task.resources[0].node)
- self.assertEqual(task.driver, task.resources[0].driver)
- self.assertEqual(task.node_manager, task.resources[0])
-
- def test_one_node_per_task_properties_fail(self):
- self.uuids.append(create_fake_node(self.context, 456))
- with task_manager.acquire(self.context, self.uuids) as task:
- def get_node():
- return task.node
-
- def get_driver():
- return task.driver
-
- def get_node_manager():
- return task.node_manager
-
- self.assertRaises(AttributeError, get_node)
- self.assertRaises(AttributeError, get_driver)
- self.assertRaises(AttributeError, get_node_manager)
+ self.task = mock.Mock(spec=task_manager.TaskManager)
+ self.args_task_first = (self.task, 1, 2)
+ self.args_task_second = (1, self.task, 2)
+ self.kwargs = dict(cat='meow', dog='wuff')
+
+ def test_with_excl_lock_task_first_arg(self):
+ self.task.shared = False
+ (args, kwargs) = _req_excl_lock_method(*self.args_task_first,
+ **self.kwargs)
+ self.assertEqual(self.args_task_first, args)
+ self.assertEqual(self.kwargs, kwargs)
+
+ def test_with_excl_lock_task_second_arg(self):
+ self.task.shared = False
+ (args, kwargs) = _req_excl_lock_method(*self.args_task_second,
+ **self.kwargs)
+ self.assertEqual(self.args_task_second, args)
+ self.assertEqual(self.kwargs, kwargs)
+
+ def test_with_shared_lock_task_first_arg(self):
+ self.task.shared = True
+ self.assertRaises(exception.ExclusiveLockRequired,
+ _req_excl_lock_method,
+ *self.args_task_first,
+ **self.kwargs)
+
+ def test_with_shared_lock_task_second_arg(self):
+ self.task.shared = True
+ self.assertRaises(exception.ExclusiveLockRequired,
+ _req_excl_lock_method,
+ *self.args_task_second,
+ **self.kwargs)
class TaskManagerGreenThreadTestCase(tests_base.TestCase):
diff --git a/ironic/tests/db/test_nodes.py b/ironic/tests/db/test_nodes.py
index 239621c95..488569fec 100644
--- a/ironic/tests/db/test_nodes.py
+++ b/ironic/tests/db/test_nodes.py
@@ -40,36 +40,6 @@ class DbNodeTestCase(base.DbTestCase):
self.dbapi.create_node(n)
return n
- def _create_many_test_nodes(self):
- uuids = []
- for i in range(1, 6):
- n = self._create_test_node(id=i, uuid=ironic_utils.generate_uuid())
- uuids.append(n['uuid'])
- uuids.sort()
- return uuids
-
- def _create_associated_nodes(self):
- uuids = []
- uuids_with_instance = []
-
- for i in range(1, 5):
- uuid = ironic_utils.generate_uuid()
- uuids.append(six.text_type(uuid))
- if i < 3:
- instance_uuid = ironic_utils.generate_uuid()
- uuids_with_instance.append(six.text_type(uuid))
- else:
- instance_uuid = None
-
- n = utils.get_test_node(id=i,
- uuid=uuid,
- instance_uuid=instance_uuid)
- self.dbapi.create_node(n)
-
- uuids.sort()
- uuids_with_instance.sort()
- return (uuids, uuids_with_instance)
-
def test_create_node(self):
self._create_test_node()
@@ -353,14 +323,14 @@ class DbNodeTestCase(base.DbTestCase):
res = self.dbapi.update_node(n['id'], {'extra': {'foo': 'bar'}})
self.assertIsNone(res['provision_updated_at'])
- def test_reserve_one_node(self):
+ def test_reserve_node(self):
n = self._create_test_node()
uuid = n['uuid']
r1 = 'fake-reservation'
# reserve the node
- self.dbapi.reserve_nodes(r1, [uuid])
+ self.dbapi.reserve_node(r1, uuid)
# check reservation
res = self.dbapi.get_node_by_uuid(uuid)
@@ -371,10 +341,10 @@ class DbNodeTestCase(base.DbTestCase):
uuid = n['uuid']
r1 = 'fake-reservation'
- self.dbapi.reserve_nodes(r1, [uuid])
+ self.dbapi.reserve_node(r1, uuid)
# release reservation
- self.dbapi.release_nodes(r1, [uuid])
+ self.dbapi.release_node(r1, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertIsNone(res.reservation)
@@ -386,15 +356,15 @@ class DbNodeTestCase(base.DbTestCase):
r2 = 'another-reservation'
# reserve the node
- self.dbapi.reserve_nodes(r1, [uuid])
+ self.dbapi.reserve_node(r1, uuid)
# another host fails to reserve or release
self.assertRaises(exception.NodeLocked,
- self.dbapi.reserve_nodes,
- r2, [uuid])
+ self.dbapi.reserve_node,
+ r2, uuid)
self.assertRaises(exception.NodeLocked,
- self.dbapi.release_nodes,
- r2, [uuid])
+ self.dbapi.release_node,
+ r2, uuid)
def test_reservation_after_release(self):
n = self._create_test_node()
@@ -403,94 +373,48 @@ class DbNodeTestCase(base.DbTestCase):
r1 = 'fake-reservation'
r2 = 'another-reservation'
- self.dbapi.reserve_nodes(r1, [uuid])
- self.dbapi.release_nodes(r1, [uuid])
+ self.dbapi.reserve_node(r1, uuid)
+ self.dbapi.release_node(r1, uuid)
# another host succeeds
- self.dbapi.reserve_nodes(r2, [uuid])
+ self.dbapi.reserve_node(r2, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r2, res.reservation)
- def test_reserve_many_nodes(self):
- uuids = self._create_many_test_nodes()
- r1 = 'first-reservation'
-
- self.dbapi.reserve_nodes(r1, uuids)
-
- for uuid in uuids:
- res = self.dbapi.get_node_by_uuid(uuid)
- self.assertEqual(r1, res.reservation)
-
- def test_reserve_overlaping_ranges_fails(self):
- uuids = self._create_many_test_nodes()
-
- r1 = 'first-reservation'
- r2 = 'second-reservation'
-
- self.dbapi.reserve_nodes(r1, uuids[:3])
-
- self.assertRaises(exception.NodeLocked,
- self.dbapi.reserve_nodes,
- r2, uuids)
- self.assertRaises(exception.NodeLocked,
- self.dbapi.reserve_nodes,
- r2, uuids[2:])
-
- def test_reserve_non_overlaping_ranges(self):
- uuids = self._create_many_test_nodes()
-
- r1 = 'first-reservation'
- r2 = 'second-reservation'
-
- self.dbapi.reserve_nodes(r1, uuids[:3])
- self.dbapi.reserve_nodes(r2, uuids[3:])
-
- for i in range(0, len(uuids)):
- res = self.dbapi.get_node_by_uuid(uuids[i])
-
- reservation = r1 if i < 3 else r2
- self.assertEqual(reservation, res.reservation)
-
- def test_reserve_empty(self):
- self.assertRaises(exception.InvalidIdentity,
- self.dbapi.reserve_nodes, 'reserv1', [])
-
def test_reservation_in_exception_message(self):
n = self._create_test_node()
uuid = n['uuid']
r = 'fake-reservation'
- self.dbapi.reserve_nodes(r, [uuid])
+ self.dbapi.reserve_node(r, uuid)
try:
- self.dbapi.reserve_nodes('another', [uuid])
+ self.dbapi.reserve_node('another', uuid)
except exception.NodeLocked as e:
self.assertIn(r, str(e))
- def test_release_overlaping_ranges_fails(self):
- uuids = self._create_many_test_nodes()
-
- r1 = 'first-reservation'
- r2 = 'second-reservation'
-
- self.dbapi.reserve_nodes(r1, uuids[:3])
- self.dbapi.reserve_nodes(r2, uuids[3:])
-
- self.assertRaises(exception.NodeLocked,
- self.dbapi.release_nodes,
- r1, uuids)
+ def test_reservation_non_existent_node(self):
+ n = self._create_test_node()
+ self.dbapi.destroy_node(n['id'])
- def test_release_non_ranges(self):
- uuids = self._create_many_test_nodes()
+ self.assertRaises(exception.NodeNotFound,
+ self.dbapi.reserve_node, 'fake', n['id'])
+ self.assertRaises(exception.NodeNotFound,
+ self.dbapi.reserve_node, 'fake', n['uuid'])
- r1 = 'first-reservation'
- r2 = 'second-reservation'
+ def test_release_non_existent_node(self):
+ n = self._create_test_node()
+ self.dbapi.destroy_node(n['id'])
- self.dbapi.reserve_nodes(r1, uuids[:3])
- self.dbapi.reserve_nodes(r2, uuids[3:])
+ self.assertRaises(exception.NodeNotFound,
+ self.dbapi.release_node, 'fake', n['id'])
+ self.assertRaises(exception.NodeNotFound,
+ self.dbapi.release_node, 'fake', n['uuid'])
- self.dbapi.release_nodes(r1, uuids[:3])
- self.dbapi.release_nodes(r2, uuids[3:])
+ def test_release_non_locked_node(self):
+ n = self._create_test_node()
- for uuid in uuids:
- res = self.dbapi.get_node_by_uuid(uuid)
- self.assertIsNone(res.reservation)
+ self.assertEqual(None, n['reservation'])
+ self.assertRaises(exception.NodeNotLocked,
+ self.dbapi.release_node, 'fake', n['id'])
+ self.assertRaises(exception.NodeNotLocked,
+ self.dbapi.release_node, 'fake', n['uuid'])
diff --git a/ironic/tests/db/test_ports.py b/ironic/tests/db/test_ports.py
index b129091f4..01dd07dee 100644
--- a/ironic/tests/db/test_ports.py
+++ b/ironic/tests/db/test_ports.py
@@ -99,7 +99,7 @@ class DbPortTestCase(base.DbTestCase):
def test_destroy_port_on_reserved_node(self):
p = self.dbapi.create_port(db_utils.get_test_port(node_id=self.n.id))
uuid = self.n.uuid
- self.dbapi.reserve_nodes('fake-reservation', [uuid])
+ self.dbapi.reserve_node('fake-reservation', uuid)
self.assertRaises(exception.NodeLocked,
self.dbapi.destroy_port, p.id)
diff --git a/ironic/tests/drivers/test_ipminative.py b/ironic/tests/drivers/test_ipminative.py
index ff5ebe3ab..0ec04a236 100644
--- a/ironic/tests/drivers/test_ipminative.py
+++ b/ironic/tests/drivers/test_ipminative.py
@@ -172,7 +172,7 @@ class IPMINativeDriverTestCase(db_base.DbTestCase):
power_on_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context,
- [self.node.uuid]) as task:
+ self.node.uuid) as task:
self.driver.power.set_power_state(
task, states.POWER_ON)
power_on_mock.assert_called_once_with(self.info)
@@ -182,7 +182,7 @@ class IPMINativeDriverTestCase(db_base.DbTestCase):
power_off_mock.return_value = states.POWER_OFF
with task_manager.acquire(self.context,
- [self.node.uuid]) as task:
+ self.node.uuid) as task:
self.driver.power.set_power_state(
task, states.POWER_OFF)
power_off_mock.assert_called_once_with(self.info)
@@ -194,7 +194,7 @@ class IPMINativeDriverTestCase(db_base.DbTestCase):
self.config(retry_timeout=500, group='ipmi')
with task_manager.acquire(self.context,
- [self.node.uuid]) as task:
+ self.node.uuid) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.set_power_state,
task,
@@ -207,12 +207,12 @@ class IPMINativeDriverTestCase(db_base.DbTestCase):
ipmicmd.set_bootdev.return_value = None
with task_manager.acquire(self.context,
- [self.node.uuid]) as task:
+ self.node.uuid) as task:
self.driver.vendor._set_boot_device(task, 'pxe')
ipmicmd.set_bootdev.assert_called_once_with('pxe')
def test_set_boot_device_bad_device(self):
- with task_manager.acquire(self.context, [self.node.uuid]) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.vendor._set_boot_device,
task,
@@ -223,7 +223,7 @@ class IPMINativeDriverTestCase(db_base.DbTestCase):
reboot_mock.return_value = None
with task_manager.acquire(self.context,
- [self.node.uuid]) as task:
+ self.node.uuid) as task:
self.driver.power.reboot(task)
reboot_mock.assert_called_once_with(self.info)
@@ -234,7 +234,7 @@ class IPMINativeDriverTestCase(db_base.DbTestCase):
self.config(retry_timeout=500, group='ipmi')
with task_manager.acquire(self.context,
- [self.node.uuid]) as task:
+ self.node.uuid) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.reboot,
task)
@@ -242,14 +242,14 @@ class IPMINativeDriverTestCase(db_base.DbTestCase):
def test_vendor_passthru_validate__set_boot_device_good(self):
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.driver.vendor.validate(task,
method='set_boot_device',
device='pxe')
def test_vendor_passthru_val__set_boot_device_fail_unknown_device(self):
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.vendor.validate,
task, method='set_boot_device',
@@ -257,21 +257,21 @@ class IPMINativeDriverTestCase(db_base.DbTestCase):
def test_vendor_passthru_val__set_boot_device_fail_missed_device_arg(self):
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.vendor.validate,
task, method='set_boot_device')
def test_vendor_passthru_validate_method_notmatch(self):
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.vendor.validate,
task, method='non-existent-method')
@mock.patch.object(ipminative.VendorPassthru, '_set_boot_device')
def test_vendor_passthru_call__set_boot_device(self, boot_mock):
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.driver.vendor.vendor_passthru(task,
method='set_boot_device',
diff --git a/ironic/tests/drivers/test_ipmitool.py b/ironic/tests/drivers/test_ipmitool.py
index 7907313ed..211ae3c39 100644
--- a/ironic/tests/drivers/test_ipmitool.py
+++ b/ironic/tests/drivers/test_ipmitool.py
@@ -306,7 +306,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
mock_on.return_value = states.POWER_ON
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.driver.power.set_power_state(task,
states.POWER_ON)
@@ -321,7 +321,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
mock_off.return_value = states.POWER_OFF
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.driver.power.set_power_state(task,
states.POWER_OFF)
@@ -335,7 +335,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
mock_on.return_value = states.ERROR
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.set_power_state,
task,
@@ -345,7 +345,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
self.assertFalse(mock_off.called)
def test_set_power_invalid_state(self):
- with task_manager.acquire(self.context, [self.node['uuid']]) as task:
+ with task_manager.acquire(self.context, self.node['uuid']) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.power.set_power_state,
task,
@@ -356,13 +356,13 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
mock_exec.return_value = [None, None]
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.driver.vendor._set_boot_device(task, 'pxe')
mock_exec.assert_called_once_with(self.info, "chassis bootdev pxe")
def test_set_boot_device_bad_device(self):
- with task_manager.acquire(self.context, [self.node['uuid']]) as task:
+ with task_manager.acquire(self.context, self.node['uuid']) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.vendor._set_boot_device,
task,
@@ -380,7 +380,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
mock.call.power_on(self.info)]
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.driver.power.reboot(task)
self.assertEqual(manager.mock_calls, expected)
@@ -397,7 +397,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
mock.call.power_on(self.info)]
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.reboot,
task)
@@ -431,7 +431,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
@mock.patch.object(ipmi.VendorPassthru, '_set_boot_device')
def test_vendor_passthru_call_set_boot_device(self, boot_mock):
- with task_manager.acquire(self.context, [self.node['uuid']],
+ with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.driver.vendor.vendor_passthru(task,
method='set_boot_device',
@@ -442,7 +442,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
def test_validate_ok(self, exec_mock):
exec_mock.return_value = ('System GUID: fake', '')
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
task.driver.power.validate(task, task.node)
exec_mock.assert_called_once_with(mock.ANY, "mc guid")
@@ -450,7 +450,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
def test_validate_fail(self, exec_mock):
exec_mock.side_effect = Exception
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate, task,
task.node)
@@ -462,7 +462,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
mock_exec.return_value = None
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.driver.console.start_console(task, self.node)
mock_exec.assert_called_once_with(self.info['uuid'],
@@ -477,7 +477,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
error='error')
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.assertRaises(exception.ConsoleSubprocessFailed,
self.driver.console.start_console,
task, self.node)
@@ -488,7 +488,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
mock_exec.return_value = None
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
self.driver.console.stop_console(task, self.node)
mock_exec.assert_called_once_with(self.info['uuid'])
@@ -502,7 +502,7 @@ class IPMIToolDriverTestCase(db_base.DbTestCase):
expected = {'type': 'shellinabox', 'url': url}
with task_manager.acquire(self.context,
- [self.node['uuid']]) as task:
+ self.node['uuid']) as task:
console_info = self.driver.console.get_console(task,
self.node)
diff --git a/ironic/tests/drivers/test_pxe.py b/ironic/tests/drivers/test_pxe.py
index e559cc5a0..288c72889 100644
--- a/ironic/tests/drivers/test_pxe.py
+++ b/ironic/tests/drivers/test_pxe.py
@@ -575,19 +575,19 @@ class PXEDriverTestCase(db_base.DbTestCase):
def test_validate_good(self, mock_glance):
mock_glance.return_value = {'properties': {'kernel_id': 'fake-kernel',
'ramdisk_id': 'fake-initr'}}
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- task.resources[0].driver.deploy.validate(task, self.node)
+ task.driver.deploy.validate(task, self.node)
def test_validate_fail(self):
info = dict(INFO_DICT)
del info['pxe_image_source']
self.node['driver_info'] = json.dumps(info)
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node['driver_info'] = json.dumps(info)
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.deploy.validate,
+ task.driver.deploy.validate,
task, task.node)
def test_validate_fail_no_port(self):
@@ -595,10 +595,10 @@ class PXEDriverTestCase(db_base.DbTestCase):
self.context,
id=321, uuid='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
driver='fake_pxe', driver_info=INFO_DICT)
- with task_manager.acquire(self.context, [new_node.uuid],
+ with task_manager.acquire(self.context, new_node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.deploy.validate,
+ task.driver.deploy.validate,
task, new_node)
@mock.patch.object(base_image_service.BaseImageService, '_show')
@@ -610,9 +610,9 @@ class PXEDriverTestCase(db_base.DbTestCase):
# not present in the keystone catalog
mock_ks.side_effect = exception.CatalogFailure
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- task.resources[0].driver.deploy.validate(task, self.node)
+ task.driver.deploy.validate(task, self.node)
self.assertFalse(mock_ks.called)
@mock.patch.object(base_image_service.BaseImageService, '_show')
@@ -625,9 +625,9 @@ class PXEDriverTestCase(db_base.DbTestCase):
# not present in the config file
self.config(group='conductor', api_url=None)
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- task.resources[0].driver.deploy.validate(task, self.node)
+ task.driver.deploy.validate(task, self.node)
mock_ks.assert_called_once_with()
@mock.patch.object(keystone, 'get_service_url')
@@ -637,10 +637,10 @@ class PXEDriverTestCase(db_base.DbTestCase):
# not present in the config file
self.config(group='conductor', api_url=None)
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.deploy.validate,
+ task.driver.deploy.validate,
task, self.node)
mock_ks.assert_called_once_with()
@@ -676,25 +676,25 @@ class PXEDriverTestCase(db_base.DbTestCase):
task, self.node)
def test_vendor_passthru_validate_good(self):
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- task.resources[0].driver.vendor.validate(task,
- method='pass_deploy_info', address='123456', iqn='aaa-bbb',
- key='fake-56789')
+ task.driver.vendor.validate(task, method='pass_deploy_info',
+ address='123456', iqn='aaa-bbb',
+ key='fake-56789')
def test_vendor_passthru_validate_fail(self):
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.vendor.validate,
+ task.driver.vendor.validate,
task, method='pass_deploy_info',
key='fake-56789')
def test_vendor_passthru_validate_key_notmatch(self):
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.vendor.validate,
+ task.driver.vendor.validate,
task, method='pass_deploy_info',
address='123456', iqn='aaa-bbb',
key='fake-12345')
@@ -814,9 +814,9 @@ class PXEDriverTestCase(db_base.DbTestCase):
fake_deploy))
with task_manager.acquire(self.context, self.node.uuid) as task:
- task.resources[0].driver.vendor.vendor_passthru(task,
- method='pass_deploy_info', address='123456', iqn='aaa-bbb',
- key='fake-56789')
+ task.driver.vendor.vendor_passthru(
+ task, method='pass_deploy_info', address='123456',
+ iqn='aaa-bbb', key='fake-56789')
self.node.refresh(self.context)
self.assertEqual(states.ACTIVE, self.node.provision_state)
self.assertEqual(states.POWER_ON, self.node.power_state)
@@ -839,10 +839,10 @@ class PXEDriverTestCase(db_base.DbTestCase):
'ironic.drivers.modules.deploy_utils.deploy',
fake_deploy))
- with task_manager.acquire(self.context, [self.node.uuid]) as task:
- task.resources[0].driver.vendor.vendor_passthru(task,
- method='pass_deploy_info', address='123456', iqn='aaa-bbb',
- key='fake-56789')
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.vendor.vendor_passthru(
+ task, method='pass_deploy_info', address='123456',
+ iqn='aaa-bbb', key='fake-56789')
self.node.refresh(self.context)
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
self.assertEqual(states.POWER_OFF, self.node.power_state)
@@ -865,10 +865,11 @@ class PXEDriverTestCase(db_base.DbTestCase):
'ironic.drivers.modules.deploy_utils.deploy',
fake_deploy))
- with task_manager.acquire(self.context, [self.node.uuid]) as task:
- task.resources[0].driver.vendor.vendor_passthru(task,
- method='pass_deploy_info', address='123456', iqn='aaa-bbb',
- key='fake-56789', error='test ramdisk error')
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.vendor.vendor_passthru(
+ task, method='pass_deploy_info', address='123456',
+ iqn='aaa-bbb', key='fake-56789',
+ error='test ramdisk error')
self.node.refresh(self.context)
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
self.assertEqual(states.POWER_OFF, self.node.power_state)
@@ -882,16 +883,17 @@ class PXEDriverTestCase(db_base.DbTestCase):
self.node.provision_state = 'FAKE'
self.node.save()
- with task_manager.acquire(self.context, [self.node.uuid]) as task:
- task.resources[0].driver.vendor.vendor_passthru(task,
- method='pass_deploy_info', address='123456', iqn='aaa-bbb',
- key='fake-56789', error='test ramdisk error')
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.vendor.vendor_passthru(
+ task, method='pass_deploy_info', address='123456',
+ iqn='aaa-bbb', key='fake-56789',
+ error='test ramdisk error')
self.node.refresh(self.context)
self.assertEqual('FAKE', self.node.provision_state)
self.assertEqual(states.POWER_ON, self.node.power_state)
def test_lock_elevated(self):
- with task_manager.acquire(self.context, [self.node.uuid]) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.object(task.driver.vendor, '_continue_deploy') \
as _continue_deploy_mock:
task.driver.vendor.vendor_passthru(task,
@@ -969,9 +971,9 @@ class PXEDriverTestCase(db_base.DbTestCase):
token_path = self._create_token_file()
self.config(image_cache_size=0, group='pxe')
- with task_manager.acquire(self.context, [self.node.uuid],
+ with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- task.resources[0].driver.deploy.clean_up(task)
+ task.driver.deploy.clean_up(task)
get_tftp_image_info_mock.called_once_with(task.node)
assert_false_path = [config_path, deploy_kernel_path, image_path,
pxe_mac_path, image_dir, instance_dir,
diff --git a/ironic/tests/drivers/test_seamicro.py b/ironic/tests/drivers/test_seamicro.py
index cc47bbaa8..400add9a4 100644
--- a/ironic/tests/drivers/test_seamicro.py
+++ b/ironic/tests/drivers/test_seamicro.py
@@ -280,20 +280,19 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
@mock.patch.object(seamicro, '_parse_driver_info')
def test_power_interface_validate_good(self, parse_drv_info_mock):
- with task_manager.acquire(self.context, [self.node['uuid']],
+ with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
- task.resources[0].driver.power.validate(
- task, self.node)
+ task.driver.power.validate(task, self.node)
self.assertEqual(1, parse_drv_info_mock.call_count)
@mock.patch.object(seamicro, '_parse_driver_info')
def test_power_interface_validate_fails(self, parse_drv_info_mock):
side_effect = exception.InvalidParameterValue("Bad input")
parse_drv_info_mock.side_effect = side_effect
- with task_manager.acquire(self.context, [self.node['uuid']],
+ with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.power.validate,
+ task.driver.power.validate,
task, self.node)
self.assertEqual(1, parse_drv_info_mock.call_count)
@@ -303,9 +302,9 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
mock_reboot.return_value = states.POWER_ON
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
- task.resources[0].driver.power.reboot(task)
+ task.driver.power.reboot(task)
mock_reboot.assert_called_once_with(task.node)
@@ -314,10 +313,10 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
self.get_server_mock = self.get_server_patcher.start()
self.get_server_mock.return_value = self.Server()
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.IronicException,
- task.resources[0].driver.power.set_power_state,
+ task.driver.power.set_power_state,
task, "BAD_PSTATE")
self.get_server_patcher.stop()
@@ -327,10 +326,9 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
mock_power_on.return_value = states.POWER_ON
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
- task.resources[0].driver.power.set_power_state(task,
- states.POWER_ON)
+ task.driver.power.set_power_state(task, states.POWER_ON)
mock_power_on.assert_called_once_with(task.node)
@@ -340,11 +338,10 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
mock_power_on.return_value = states.POWER_OFF
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
- task.resources[0]
- .driver.power.set_power_state,
+ task.driver.power.set_power_state,
task, states.POWER_ON)
mock_power_on.assert_called_once_with(task.node)
@@ -355,10 +352,9 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
mock_power_off.return_value = states.POWER_OFF
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
- task.resources[0].driver.power.\
- set_power_state(task, states.POWER_OFF)
+ task.driver.power.set_power_state(task, states.POWER_OFF)
mock_power_off.assert_called_once_with(task.node)
@@ -368,27 +364,25 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
mock_power_off.return_value = states.POWER_ON
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
- task.resources[0]
- .driver.power.set_power_state,
+ task.driver.power.set_power_state,
task, states.POWER_OFF)
mock_power_off.assert_called_once_with(task.node)
def test_vendor_passthru_validate_good(self):
- with task_manager.acquire(self.context, [self.node['uuid']],
+ with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
for method in seamicro.VENDOR_PASSTHRU_METHODS:
- task.resources[0].driver.vendor.validate(
- task, **{'method': method})
+ task.driver.vendor.validate(task, **{'method': method})
def test_vendor_passthru_validate_fail(self):
- with task_manager.acquire(self.context, [self.node['uuid']],
+ with task_manager.acquire(self.context, self.node['uuid'],
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.vendor.validate,
+ task.driver.vendor.validate,
task, **{'method': 'invalid_method'})
@mock.patch.object(seamicro, '_get_server')
@@ -396,20 +390,19 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
info = seamicro._parse_driver_info(self.node)
vlan_id = "12"
mock_get_server.return_value = self.Server(active="true")
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'vlan_id': vlan_id, 'method': 'set_node_vlan_id'}
- task.resources[0].driver.vendor.\
- vendor_passthru(task, **kwargs)
+ task.driver.vendor.vendor_passthru(task, **kwargs)
mock_get_server.assert_called_once_with(info)
def test_set_node_vlan_id_no_input(self):
info = seamicro._parse_driver_info(self.node)
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.vendor.
- vendor_passthru, task,
+ task.driver.vendor.vendor_passthru,
+ task,
**{'method': 'set_node_vlan_id'})
@mock.patch.object(seamicro, '_get_server')
@@ -422,12 +415,13 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
server = self.Server(active="true")
server.set_untagged_vlan = fake_set_untagged_vlan
mock_get_server.return_value = server
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'vlan_id': vlan_id, 'method': 'set_node_vlan_id'}
self.assertRaises(exception.IronicException,
- task.resources[0].driver.vendor.
- vendor_passthru, task, **kwargs)
+ task.driver.vendor.vendor_passthru,
+ task,
+ **kwargs)
mock_get_server.assert_called_once_with(info)
@@ -439,11 +433,10 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
volume_id = '0/ironic-p6-1/vol1'
mock_validate_volume.return_value = True
mock_get_server.return_value = self.Server(active="true")
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'volume_id': volume_id, 'method': 'attach_volume'}
- task.resources[0].driver.vendor.\
- vendor_passthru(task, **kwargs)
+ task.driver.vendor.vendor_passthru(task, **kwargs)
mock_get_server.assert_called_once_with(info)
@mock.patch.object(seamicro, '_get_server')
@@ -455,12 +448,13 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
volume_id = '0/p6-1/vol1'
mock_get_volume.return_value = self.Volume(volume_id)
mock_get_server.return_value = self.Server(active="true")
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'volume_id': volume_id, 'method': 'attach_volume'}
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.vendor.
- vendor_passthru, task, **kwargs)
+ task.driver.vendor.vendor_passthru,
+ task,
+ **kwargs)
@mock.patch.object(seamicro, '_get_server')
@mock.patch.object(seamicro, '_validate_volume')
@@ -475,12 +469,13 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
server = self.Server(active="true")
server.attach_volume = fake_attach_volume
mock_get_server.return_value = server
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'volume_id': volume_id, 'method': 'attach_volume'}
self.assertRaises(exception.IronicException,
- task.resources[0].driver.vendor.
- vendor_passthru, task, **kwargs)
+ task.driver.vendor.vendor_passthru,
+ task,
+ **kwargs)
mock_get_server.assert_called_once_with(info)
@@ -496,21 +491,19 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
mock_create_volume.return_value = volume_id
mock_validate_volume.return_value = True
mock_get_server.return_value = self.Server(active="true")
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'volume_size': volume_size, 'method': "attach_volume"}
- task.resources[0].driver.vendor.\
- vendor_passthru(task, **kwargs)
+ task.driver.vendor.vendor_passthru(task, **kwargs)
mock_get_server.assert_called_once_with(info)
mock_create_volume.assert_called_once_with(info, volume_size)
def test_attach_volume_with_no_input_fail(self):
info = seamicro._parse_driver_info(self.node)
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.vendor.
- vendor_passthru, task,
+ task.driver.vendor.vendor_passthru, task,
**{'method': 'attach_volume'})
@mock.patch.object(seamicro, '_get_server')
@@ -518,20 +511,18 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
info = seamicro._parse_driver_info(self.node)
boot_device = "disk"
mock_get_server.return_value = self.Server(active="true")
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'device': boot_device, 'method': 'set_boot_device'}
- task.resources[0].driver.vendor.\
- vendor_passthru(task, **kwargs)
+ task.driver.vendor.vendor_passthru(task, **kwargs)
mock_get_server.assert_called_once_with(info)
def test_set_boot_device_no_input(self):
info = seamicro._parse_driver_info(self.node)
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.vendor.
- vendor_passthru, task,
+ task.driver.vendor.vendor_passthru, task,
**{'method': 'set_boot_device'})
@mock.patch.object(seamicro, '_get_server')
@@ -539,12 +530,13 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
info = seamicro._parse_driver_info(self.node)
boot_device = "invalid_device"
mock_get_server.return_value = self.Server(active="true")
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'device': boot_device, 'method': 'set_boot_device'}
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.vendor.
- vendor_passthru, task, **kwargs)
+ task.driver.vendor.vendor_passthru,
+ task,
+ **kwargs)
@mock.patch.object(seamicro, '_get_server')
def test_set_boot_device_fail(self, mock_get_server):
@@ -556,11 +548,12 @@ class SeaMicroPowerDriverTestCase(db_base.DbTestCase):
server = self.Server(active="true")
server.set_boot_order = fake_set_boot_order
mock_get_server.return_value = server
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
kwargs = {'device': boot_device, 'method': 'set_boot_device'}
self.assertRaises(exception.IronicException,
- task.resources[0].driver.vendor.
- vendor_passthru, task, **kwargs)
+ task.driver.vendor.vendor_passthru,
+ task,
+ **kwargs)
mock_get_server.assert_called_once_with(info)
diff --git a/ironic/tests/drivers/test_ssh.py b/ironic/tests/drivers/test_ssh.py
index 1a6593729..2e2e66ec9 100644
--- a/ironic/tests/drivers/test_ssh.py
+++ b/ironic/tests/drivers/test_ssh.py
@@ -573,10 +573,10 @@ class SSHDriverTestCase(db_base.DbTestCase):
info = ssh._parse_driver_info(self.node)
ssh_connect_mock.side_effect = exception.SSHConnectFailed(host='fake')
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.power.validate,
+ task.driver.power.validate,
task, self.node)
driver_info = ssh._parse_driver_info(self.node)
ssh_connect_mock.assert_called_once_with(driver_info)
@@ -588,10 +588,10 @@ class SSHDriverTestCase(db_base.DbTestCase):
uuid='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
- with task_manager.acquire(self.context, [new_node.uuid],
+ with task_manager.acquire(self.context, new_node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
- task.resources[0].driver.power.validate,
+ task.driver.power.validate,
task, new_node)
@mock.patch.object(driver_utils, 'get_node_mac_addresses')
@@ -612,9 +612,9 @@ class SSHDriverTestCase(db_base.DbTestCase):
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
- task.resources[0].driver.power.reboot(task)
+ task.driver.power.reboot(task)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
@@ -642,11 +642,10 @@ class SSHDriverTestCase(db_base.DbTestCase):
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
- task.resources[0].driver.power.reboot,
- task)
+ task.driver.power.reboot, task)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
@@ -666,11 +665,11 @@ class SSHDriverTestCase(db_base.DbTestCase):
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.InvalidParameterValue,
- task.resources[0].driver.power.set_power_state,
+ task.driver.power.set_power_state,
task,
"BAD_PSTATE")
@@ -691,10 +690,9 @@ class SSHDriverTestCase(db_base.DbTestCase):
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
- task.resources[0].driver.power.set_power_state(task,
- states.POWER_ON)
+ task.driver.power.set_power_state(task, states.POWER_ON)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
@@ -714,11 +712,11 @@ class SSHDriverTestCase(db_base.DbTestCase):
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.PowerStateFailure,
- task.resources[0].driver.power.set_power_state,
+ task.driver.power.set_power_state,
task,
states.POWER_ON)
@@ -740,10 +738,9 @@ class SSHDriverTestCase(db_base.DbTestCase):
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
- task.resources[0].driver.power.set_power_state(
- task, states.POWER_OFF)
+ task.driver.power.set_power_state(task, states.POWER_OFF)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
@@ -763,11 +760,11 @@ class SSHDriverTestCase(db_base.DbTestCase):
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
- with task_manager.acquire(self.context, [info['uuid']],
+ with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.PowerStateFailure,
- task.resources[0].driver.power.set_power_state,
+ task.driver.power.set_power_state,
task,
states.POWER_OFF)
diff --git a/ironic/tests/drivers/test_utils.py b/ironic/tests/drivers/test_utils.py
index c4308e068..799d3ca09 100644
--- a/ironic/tests/drivers/test_utils.py
+++ b/ironic/tests/drivers/test_utils.py
@@ -121,6 +121,6 @@ class UtilsTestCase(base.TestCase):
address='dd:ee:ff',
uuid='4fc26c0b-03f2-4d2e-ae87-c02d7f33c234',
node_id=self.node.id)))
- with task_manager.acquire(self.context, [self.node.uuid]) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
node_macs = driver_utils.get_node_mac_addresses(task)
self.assertEqual(sorted([p.address for p in ports]), sorted(node_macs))