summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/baremetal-api-v1-nodes.inc5
-rw-r--r--api-ref/source/parameters.yaml14
-rw-r--r--api-ref/source/samples/node-create-response.json2
-rw-r--r--api-ref/source/samples/node-show-response.json2
-rw-r--r--api-ref/source/samples/node-update-driver-info-response.json2
-rw-r--r--api-ref/source/samples/nodes-list-details-response.json4
-rw-r--r--doc/source/_exts/automated_steps.py2
-rw-r--r--doc/source/contributor/webapi-version-history.rst9
-rw-r--r--doc/source/install/configure-compute.rst4
-rw-r--r--ironic/api/controllers/v1/node.py43
-rw-r--r--ironic/api/controllers/v1/port.py62
-rw-r--r--ironic/api/controllers/v1/utils.py51
-rw-r--r--ironic/api/controllers/v1/versions.py3
-rw-r--r--ironic/common/exception.py5
-rw-r--r--ironic/common/policy.py16
-rw-r--r--ironic/common/release_mappings.py4
-rw-r--r--ironic/conductor/cleaning.py276
-rw-r--r--ironic/conductor/deployments.py402
-rw-r--r--ironic/conductor/manager.py730
-rw-r--r--ironic/conductor/utils.py62
-rw-r--r--ironic/db/api.py1
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/cd2c80feb331_add_node_retired_field.py33
-rw-r--r--ironic/db/sqlalchemy/api.py32
-rw-r--r--ironic/db/sqlalchemy/models.py3
-rw-r--r--ironic/drivers/modules/deploy_utils.py21
-rw-r--r--ironic/objects/node.py32
-rw-r--r--ironic/objects/port.py24
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_deploy_template.py7
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_expose.py2
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py101
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_port.py203
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_utils.py157
-rw-r--r--ironic/tests/unit/conductor/test_cleaning.py975
-rw-r--r--ironic/tests/unit/conductor/test_deployments.py1017
-rw-r--r--ironic/tests/unit/conductor/test_manager.py2132
-rw-r--r--ironic/tests/unit/conductor/test_utils.py103
-rw-r--r--ironic/tests/unit/db/sqlalchemy/test_migrations.py21
-rw-r--r--ironic/tests/unit/db/test_ports.py49
-rw-r--r--ironic/tests/unit/db/utils.py3
-rw-r--r--ironic/tests/unit/drivers/modules/test_deploy_utils.py25
-rw-r--r--ironic/tests/unit/objects/test_node.py58
-rw-r--r--ironic/tests/unit/objects/test_objects.py12
-rw-r--r--ironic/tests/unit/objects/test_port.py2
-rw-r--r--releasenotes/notes/add_retirement_support-23c5fed7ce8f97d4.yaml9
-rw-r--r--releasenotes/notes/fixes-get-boot-option-for-software-raid-baa2cffd95e1f624.yaml6
-rw-r--r--releasenotes/notes/node-owner-policy-ports-1d3193fd897feaa6.yaml8
46 files changed, 3901 insertions, 2833 deletions
diff --git a/api-ref/source/baremetal-api-v1-nodes.inc b/api-ref/source/baremetal-api-v1-nodes.inc
index 570759f30..78e8a0f1c 100644
--- a/api-ref/source/baremetal-api-v1-nodes.inc
+++ b/api-ref/source/baremetal-api-v1-nodes.inc
@@ -426,6 +426,8 @@ Response
- description: n_description
- conductor: conductor
- allocation_uuid: allocation_uuid
+ - retired: retired
+ - retired_reason: retired_reason
**Example detailed list of Nodes:**
@@ -469,6 +471,9 @@ only the specified set.
.. versionadded:: 1.52
Introduced the ``allocation_uuid`` field.
+.. versionadded:: 1.61
+ Introduced the ``retired`` and ``retired_reason`` fields.
+
Normal response codes: 200
Error codes: 400,403,404,406
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 66e066c52..4967fc61d 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -1542,6 +1542,20 @@ response_driver_type:
in: body
required: true
type: string
+retired:
+ description: |
+ Whether the node is retired and can hence no longer be provided, i.e. move
+ from ``manageable`` to ``available``, and will end up in ``manageable``
+ after cleaning (rather than ``available``).
+ in: body
+ required: false
+ type: boolean
+retired_reason:
+ description: |
+ The reason the node is marked as retired.
+ in: body
+ required: false
+ type: string
standalone_ports_supported:
description: |
Indicates whether ports that are members of this portgroup can be
diff --git a/api-ref/source/samples/node-create-response.json b/api-ref/source/samples/node-create-response.json
index 08692249a..84932a235 100644
--- a/api-ref/source/samples/node-create-response.json
+++ b/api-ref/source/samples/node-create-response.json
@@ -71,6 +71,8 @@
"rescue_interface": null,
"reservation": null,
"resource_class": "bm-large",
+ "retired": false,
+ "retired_reason": null,
"states": [
{
"href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states",
diff --git a/api-ref/source/samples/node-show-response.json b/api-ref/source/samples/node-show-response.json
index 890a311ed..3a520c96b 100644
--- a/api-ref/source/samples/node-show-response.json
+++ b/api-ref/source/samples/node-show-response.json
@@ -74,6 +74,8 @@
"rescue_interface": null,
"reservation": null,
"resource_class": "bm-large",
+ "retired": false,
+ "retired_reason": null,
"states": [
{
"href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states",
diff --git a/api-ref/source/samples/node-update-driver-info-response.json b/api-ref/source/samples/node-update-driver-info-response.json
index a3155aa30..05665a3dc 100644
--- a/api-ref/source/samples/node-update-driver-info-response.json
+++ b/api-ref/source/samples/node-update-driver-info-response.json
@@ -75,6 +75,8 @@
"rescue_interface": null,
"reservation": null,
"resource_class": null,
+ "retired": false,
+ "retired_reason": null,
"states": [
{
"href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states",
diff --git a/api-ref/source/samples/nodes-list-details-response.json b/api-ref/source/samples/nodes-list-details-response.json
index 701ac82e7..870a62558 100644
--- a/api-ref/source/samples/nodes-list-details-response.json
+++ b/api-ref/source/samples/nodes-list-details-response.json
@@ -76,6 +76,8 @@
"rescue_interface": null,
"reservation": null,
"resource_class": null,
+ "retired": false,
+ "retired_reason": null,
"states": [
{
"href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states",
@@ -178,6 +180,8 @@
"rescue_interface": "no-rescue",
"reservation": null,
"resource_class": null,
+ "retired": false,
+ "retired_reason": null,
"states": [
{
"href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/states",
diff --git a/doc/source/_exts/automated_steps.py b/doc/source/_exts/automated_steps.py
index e61d356bd..db242487f 100644
--- a/doc/source/_exts/automated_steps.py
+++ b/doc/source/_exts/automated_steps.py
@@ -174,7 +174,7 @@ class AutomatedStepsDirective(rst.Directive):
)
# NOTE(dhellmann): Useful for debugging.
- print('\n'.join(result))
+ # print('\n'.join(result))
node = nodes.section()
node.document = self.state.document
diff --git a/doc/source/contributor/webapi-version-history.rst b/doc/source/contributor/webapi-version-history.rst
index d2dcb7386..0f3c68830 100644
--- a/doc/source/contributor/webapi-version-history.rst
+++ b/doc/source/contributor/webapi-version-history.rst
@@ -2,6 +2,15 @@
REST API Version History
========================
+1.61 (Ussuri, master)
+---------------------
+
+Added ``retired`` field to the node object to mark nodes for retirement.
+If set, this flag will move nodes to ``manageable`` upon automatic
+cleaning. ``manageable`` nodes which have this flag set cannot be
+moved to available. Also added ``retired_reason`` to specify the
+retirement reason.
+
1.60 (Ussuri, master)
---------------------
diff --git a/doc/source/install/configure-compute.rst b/doc/source/install/configure-compute.rst
index e0adee957..631cb0b6e 100644
--- a/doc/source/install/configure-compute.rst
+++ b/doc/source/install/configure-compute.rst
@@ -32,10 +32,6 @@ service's controller nodes and compute nodes.
# Enable the ironic virt driver for this compute instance.
compute_driver=ironic.IronicDriver
- # Firewall driver to use with nova-network service.
- # Ironic supports only neutron, so set this to noop.
- firewall_driver=nova.virt.firewall.NoopFirewallDriver
-
# Amount of memory in MB to reserve for the host so that it is always
# available to host processes.
# It is impossible to reserve any memory on bare metal nodes, so set
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index c322d3e20..6f4814788 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -1075,6 +1075,12 @@ class Node(base.APIBase):
allocation_uuid = wsme.wsattr(types.uuid, readonly=True)
"""The UUID of the allocation this node belongs"""
+ retired = types.boolean
+ """Indicates whether the node is marked for retirement."""
+
+ retired_reason = wsme.wsattr(str)
+ """Indicates the reason for a node's retirement."""
+
# NOTE(deva): "conductor_affinity" shouldn't be presented on the
# API because it's an internal value. Don't add it here.
@@ -1291,7 +1297,8 @@ class Node(base.APIBase):
bios_interface=None, conductor_group="",
automated_clean=None, protected=False,
protected_reason=None, owner=None,
- allocation_uuid='982ddb5b-bce5-4d23-8fb8-7f710f648cd5')
+ allocation_uuid='982ddb5b-bce5-4d23-8fb8-7f710f648cd5',
+ retired=False, retired_reason=None)
# NOTE(matty_dubs): The chassis_uuid getter() is based on the
# _chassis_uuid variable:
sample._chassis_uuid = 'edcad704-b2da-41d5-96d9-afd580ecfa12'
@@ -1605,8 +1612,8 @@ class NodesController(rest.RestController):
return filtered_nodes
def _get_nodes_collection(self, chassis_uuid, instance_uuid, associated,
- maintenance, provision_state, marker, limit,
- sort_key, sort_dir, driver=None,
+ maintenance, retired, provision_state, marker,
+ limit, sort_key, sort_dir, driver=None,
resource_class=None, resource_url=None,
fields=None, fault=None, conductor_group=None,
detail=None, conductor=None, owner=None,
@@ -1654,6 +1661,7 @@ class NodesController(rest.RestController):
'conductor_group': conductor_group,
'owner': owner,
'description_contains': description_contains,
+ 'retired': retired,
}
filters = {}
for key, value in possible_filters.items():
@@ -1673,6 +1681,8 @@ class NodesController(rest.RestController):
parameters['associated'] = associated
if maintenance:
parameters['maintenance'] = maintenance
+ if retired:
+ parameters['retired'] = retired
if detail is not None:
parameters['detail'] = detail
@@ -1773,14 +1783,14 @@ class NodesController(rest.RestController):
@METRICS.timer('NodesController.get_all')
@expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean,
- types.boolean, str, types.uuid, int, str,
+ types.boolean, types.boolean, str, types.uuid, int, str,
str, str, types.listtype, str,
str, str, types.boolean, str,
str, str)
def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None,
- maintenance=None, provision_state=None, marker=None,
- limit=None, sort_key='id', sort_dir='asc', driver=None,
- fields=None, resource_class=None, fault=None,
+ maintenance=None, retired=None, provision_state=None,
+ marker=None, limit=None, sort_key='id', sort_dir='asc',
+ driver=None, fields=None, resource_class=None, fault=None,
conductor_group=None, detail=None, conductor=None,
owner=None, description_contains=None):
"""Retrieve a list of nodes.
@@ -1795,6 +1805,8 @@ class NodesController(rest.RestController):
:param maintenance: Optional boolean value that indicates whether
to get nodes in maintenance mode ("True"), or not
in maintenance mode ("False").
+ :param retired: Optional boolean value that indicates whether
+ to get retired nodes.
:param provision_state: Optional string value to get only nodes in
that provision state.
:param marker: pagination marker for large data sets.
@@ -1839,7 +1851,7 @@ class NodesController(rest.RestController):
extra_args = {'description_contains': description_contains}
return self._get_nodes_collection(chassis_uuid, instance_uuid,
- associated, maintenance,
+ associated, maintenance, retired,
provision_state, marker,
limit, sort_key, sort_dir,
driver=driver,
@@ -1853,14 +1865,15 @@ class NodesController(rest.RestController):
@METRICS.timer('NodesController.detail')
@expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean,
- types.boolean, str, types.uuid, int, str,
+ types.boolean, types.boolean, str, types.uuid, int, str,
str, str, str, str,
str, str, str, str)
def detail(self, chassis_uuid=None, instance_uuid=None, associated=None,
- maintenance=None, provision_state=None, marker=None,
- limit=None, sort_key='id', sort_dir='asc', driver=None,
- resource_class=None, fault=None, conductor_group=None,
- conductor=None, owner=None, description_contains=None):
+ maintenance=None, retired=None, provision_state=None,
+ marker=None, limit=None, sort_key='id', sort_dir='asc',
+ driver=None, resource_class=None, fault=None,
+ conductor_group=None, conductor=None, owner=None,
+ description_contains=None):
"""Retrieve a list of nodes with detail.
:param chassis_uuid: Optional UUID of a chassis, to get only nodes for
@@ -1873,6 +1886,8 @@ class NodesController(rest.RestController):
:param maintenance: Optional boolean value that indicates whether
to get nodes in maintenance mode ("True"), or not
in maintenance mode ("False").
+ :param retired: Optional boolean value that indicates whether
+ to get nodes which are retired.
:param provision_state: Optional string value to get only nodes in
that provision state.
:param marker: pagination marker for large data sets.
@@ -1914,7 +1929,7 @@ class NodesController(rest.RestController):
resource_url = '/'.join(['nodes', 'detail'])
extra_args = {'description_contains': description_contains}
return self._get_nodes_collection(chassis_uuid, instance_uuid,
- associated, maintenance,
+ associated, maintenance, retired,
provision_state, marker,
limit, sort_key, sort_dir,
driver=driver,
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index 9e551894f..6a07d7a4f 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -339,7 +339,8 @@ class PortsController(rest.RestController):
def _get_ports_collection(self, node_ident, address, portgroup_ident,
marker, limit, sort_key, sort_dir,
- resource_url=None, fields=None, detail=None):
+ resource_url=None, fields=None, detail=None,
+ owner=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
@@ -370,7 +371,8 @@ class PortsController(rest.RestController):
portgroup.id, limit,
marker_obj,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ owner=owner)
elif node_ident:
# FIXME(comstud): Since all we need is the node ID, we can
# make this more efficient by only querying
@@ -380,13 +382,14 @@ class PortsController(rest.RestController):
ports = objects.Port.list_by_node_id(api.request.context,
node.id, limit, marker_obj,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ owner=owner)
elif address:
- ports = self._get_ports_by_address(address)
+ ports = self._get_ports_by_address(address, owner=owner)
else:
ports = objects.Port.list(api.request.context, limit,
marker_obj, sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir, owner=owner)
parameters = {}
if detail is not None:
@@ -399,7 +402,7 @@ class PortsController(rest.RestController):
sort_dir=sort_dir,
**parameters)
- def _get_ports_by_address(self, address):
+ def _get_ports_by_address(self, address, owner=None):
"""Retrieve a port by its address.
:param address: MAC address of a port, to get the port which has
@@ -408,7 +411,8 @@ class PortsController(rest.RestController):
"""
try:
- port = objects.Port.get_by_address(api.request.context, address)
+ port = objects.Port.get_by_address(api.request.context, address,
+ owner=owner)
return [port]
except exception.PortNotFound:
return []
@@ -469,8 +473,7 @@ class PortsController(rest.RestController):
for that portgroup.
:raises: NotAcceptable, HTTPNotFound
"""
- cdict = api.request.context.to_policy_values()
- policy.authorize('baremetal:port:get', cdict, cdict)
+ owner = api_utils.check_port_list_policy()
api_utils.check_allow_specify_fields(fields)
self._check_allowed_port_fields(fields)
@@ -493,7 +496,7 @@ class PortsController(rest.RestController):
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
sort_dir, fields=fields,
- detail=detail)
+ detail=detail, owner=owner)
@METRICS.timer('PortsController.detail')
@expose.expose(PortCollection, types.uuid_or_name, types.uuid,
@@ -523,8 +526,7 @@ class PortsController(rest.RestController):
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:raises: NotAcceptable, HTTPNotFound
"""
- cdict = api.request.context.to_policy_values()
- policy.authorize('baremetal:port:get', cdict, cdict)
+ owner = api_utils.check_port_list_policy()
self._check_allowed_port_fields([sort_key])
if portgroup and not api_utils.allow_portgroups_subcontrollers():
@@ -546,7 +548,7 @@ class PortsController(rest.RestController):
resource_url = '/'.join(['ports', 'detail'])
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
- sort_dir, resource_url)
+ sort_dir, resource_url, owner=owner)
@METRICS.timer('PortsController.get_one')
@expose.expose(Port, types.uuid, types.listtype)
@@ -558,16 +560,15 @@ class PortsController(rest.RestController):
of the resource to be returned.
:raises: NotAcceptable, HTTPNotFound
"""
- cdict = api.request.context.to_policy_values()
- policy.authorize('baremetal:port:get', cdict, cdict)
-
if self.parent_node_ident or self.parent_portgroup_ident:
raise exception.OperationNotPermitted()
+ rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve(
+ 'baremetal:port:get', port_uuid)
+
api_utils.check_allow_specify_fields(fields)
self._check_allowed_port_fields(fields)
- rpc_port = objects.Port.get_by_uuid(api.request.context, port_uuid)
return Port.convert_with_links(rpc_port, fields=fields)
@METRICS.timer('PortsController.post')
@@ -578,13 +579,13 @@ class PortsController(rest.RestController):
:param port: a port within the request body.
:raises: NotAcceptable, HTTPNotFound, Conflict
"""
+ if self.parent_node_ident or self.parent_portgroup_ident:
+ raise exception.OperationNotPermitted()
+
context = api.request.context
cdict = context.to_policy_values()
policy.authorize('baremetal:port:create', cdict, cdict)
- if self.parent_node_ident or self.parent_portgroup_ident:
- raise exception.OperationNotPermitted()
-
pdict = port.as_dict()
self._check_allowed_port_fields(pdict)
@@ -660,13 +661,14 @@ class PortsController(rest.RestController):
:param patch: a json PATCH document to apply to this port.
:raises: NotAcceptable, HTTPNotFound
"""
- context = api.request.context
- cdict = context.to_policy_values()
- policy.authorize('baremetal:port:update', cdict, cdict)
-
if self.parent_node_ident or self.parent_portgroup_ident:
raise exception.OperationNotPermitted()
+ rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve(
+ 'baremetal:port:update', port_uuid)
+
+ context = api.request.context
+
fields_to_check = set()
for field in (self.advanced_net_fields
+ ['portgroup_uuid', 'physical_network',
@@ -677,7 +679,6 @@ class PortsController(rest.RestController):
fields_to_check.add(field)
self._check_allowed_port_fields(fields_to_check)
- rpc_port = objects.Port.get_by_uuid(context, port_uuid)
port_dict = rpc_port.as_dict()
# NOTE(lucasagomes):
# 1) Remove node_id because it's an internal value and
@@ -708,7 +709,6 @@ class PortsController(rest.RestController):
if rpc_port[field] != patch_val:
rpc_port[field] = patch_val
- rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)
if (rpc_node.provision_state == ir_states.INSPECTING
and api_utils.allow_inspect_wait_state()):
msg = _('Cannot update port "%(port)s" on "%(node)s" while it is '
@@ -742,15 +742,13 @@ class PortsController(rest.RestController):
:param port_uuid: UUID of a port.
:raises: OperationNotPermitted, HTTPNotFound
"""
- context = api.request.context
- cdict = context.to_policy_values()
- policy.authorize('baremetal:port:delete', cdict, cdict)
-
if self.parent_node_ident or self.parent_portgroup_ident:
raise exception.OperationNotPermitted()
- rpc_port = objects.Port.get_by_uuid(context, port_uuid)
- rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)
+ rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve(
+ 'baremetal:port:delete', port_uuid)
+
+ context = api.request.context
portgroup_uuid = None
if rpc_port.portgroup_id:
diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py
index 7712fc30f..28b6c9174 100644
--- a/ironic/api/controllers/v1/utils.py
+++ b/ironic/api/controllers/v1/utils.py
@@ -41,7 +41,8 @@ from ironic import objects
CONF = cfg.CONF
-_JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException,
+_JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchConflict,
+ jsonpatch.JsonPatchException,
jsonpatch.JsonPointerException,
KeyError,
IndexError)
@@ -487,6 +488,8 @@ VERSIONED_FIELDS = {
'description': versions.MINOR_51_NODE_DESCRIPTION,
'allocation_uuid': versions.MINOR_52_ALLOCATION,
'events': versions.MINOR_54_EVENTS,
+ 'retired': versions.MINOR_61_NODE_RETIRED,
+ 'retired_reason': versions.MINOR_61_NODE_RETIRED,
}
for field in V31_FIELDS:
@@ -1228,6 +1231,52 @@ def check_node_list_policy(owner=None):
return owner
+def check_port_policy_and_retrieve(policy_name, port_uuid):
+ """Check if the specified policy authorizes this request on a port.
+
+ :param: policy_name: Name of the policy to check.
+ :param: port_uuid: the UUID of a port.
+
+ :raises: HTTPForbidden if the policy forbids access.
+ :raises: NodeNotFound if the node is not found.
+ :return: RPC port identified by port_uuid and associated node
+ """
+ context = api.request.context
+ cdict = context.to_policy_values()
+
+ try:
+ rpc_port = objects.Port.get_by_uuid(context, port_uuid)
+ except exception.PortNotFound:
+ # don't expose non-existence of port unless requester
+ # has generic access to policy
+ policy.authorize(policy_name, cdict, cdict)
+ raise
+
+ rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)
+ target_dict = dict(cdict)
+ target_dict['node.owner'] = rpc_node['owner']
+ policy.authorize(policy_name, target_dict, cdict)
+
+ return rpc_port, rpc_node
+
+
+def check_port_list_policy():
+ """Check if the specified policy authorizes this request on a port.
+
+ :raises: HTTPForbidden if the policy forbids access.
+ :return: owner that should be used for list query, if needed
+ """
+ cdict = api.request.context.to_policy_values()
+ try:
+ policy.authorize('baremetal:port:list_all', cdict, cdict)
+ except exception.HTTPForbidden:
+ owner = cdict.get('project_id')
+ if not owner:
+ raise
+ policy.authorize('baremetal:port:list', cdict, cdict)
+ return owner
+
+
def allow_build_configdrive():
"""Check if building configdrive is allowed.
diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py
index 19a3ac4a1..99e04b60e 100644
--- a/ironic/api/controllers/v1/versions.py
+++ b/ironic/api/controllers/v1/versions.py
@@ -160,6 +160,7 @@ MINOR_57_ALLOCATION_UPDATE = 57
MINOR_58_ALLOCATION_BACKFILL = 58
MINOR_59_CONFIGDRIVE_VENDOR_DATA = 59
MINOR_60_ALLOCATION_OWNER = 60
+MINOR_61_NODE_RETIRED = 61
# When adding another version, update:
# - MINOR_MAX_VERSION
@@ -167,7 +168,7 @@ MINOR_60_ALLOCATION_OWNER = 60
# explanation of what changed in the new version
# - common/release_mappings.py, RELEASE_MAPPING['master']['api']
-MINOR_MAX_VERSION = MINOR_60_ALLOCATION_OWNER
+MINOR_MAX_VERSION = MINOR_61_NODE_RETIRED
# String representations of the minor and maximum versions
_MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION)
diff --git a/ironic/common/exception.py b/ironic/common/exception.py
index 1daee6318..9c949df4d 100644
--- a/ironic/common/exception.py
+++ b/ironic/common/exception.py
@@ -710,3 +710,8 @@ class IBMCConnectionError(IBMCError):
class ClientSideError(wsme.exc.ClientSideError):
pass
+
+
+class NodeIsRetired(Invalid):
+ _msg_fmt = _("The %(op)s operation can't be performed on node "
+ "%(node)s because it is retired.")
diff --git a/ironic/common/policy.py b/ironic/common/policy.py
index 9e019ccc0..51bd3b3dc 100644
--- a/ironic/common/policy.py
+++ b/ironic/common/policy.py
@@ -231,15 +231,25 @@ port_policies = [
'baremetal:port:get',
'rule:is_admin or rule:is_observer',
'Retrieve Port records',
- [{'path': '/ports', 'method': 'GET'},
- {'path': '/ports/detail', 'method': 'GET'},
- {'path': '/ports/{port_id}', 'method': 'GET'},
+ [{'path': '/ports/{port_id}', 'method': 'GET'},
{'path': '/nodes/{node_ident}/ports', 'method': 'GET'},
{'path': '/nodes/{node_ident}/ports/detail', 'method': 'GET'},
{'path': '/portgroups/{portgroup_ident}/ports', 'method': 'GET'},
{'path': '/portgroups/{portgroup_ident}/ports/detail',
'method': 'GET'}]),
policy.DocumentedRuleDefault(
+ 'baremetal:port:list',
+ 'rule:baremetal:port:get',
+ 'Retrieve multiple Port records, filtered by owner',
+ [{'path': '/ports', 'method': 'GET'},
+ {'path': '/ports/detail', 'method': 'GET'}]),
+ policy.DocumentedRuleDefault(
+ 'baremetal:port:list_all',
+ 'rule:baremetal:port:get',
+ 'Retrieve multiple Port records',
+ [{'path': '/ports', 'method': 'GET'},
+ {'path': '/ports/detail', 'method': 'GET'}]),
+ policy.DocumentedRuleDefault(
'baremetal:port:create',
'rule:is_admin',
'Create Port records',
diff --git a/ironic/common/release_mappings.py b/ironic/common/release_mappings.py
index 1f619aa0e..e2f4b8e23 100644
--- a/ironic/common/release_mappings.py
+++ b/ironic/common/release_mappings.py
@@ -197,11 +197,11 @@ RELEASE_MAPPING = {
}
},
'master': {
- 'api': '1.60',
+ 'api': '1.61',
'rpc': '1.48',
'objects': {
'Allocation': ['1.1'],
- 'Node': ['1.32'],
+ 'Node': ['1.33', '1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
diff --git a/ironic/conductor/cleaning.py b/ironic/conductor/cleaning.py
new file mode 100644
index 000000000..9f306e574
--- /dev/null
+++ b/ironic/conductor/cleaning.py
@@ -0,0 +1,276 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functionality related to cleaning."""
+
+from oslo_log import log
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import states
+from ironic.conductor import steps as conductor_steps
+from ironic.conductor import task_manager
+from ironic.conductor import utils
+from ironic.conf import CONF
+
+LOG = log.getLogger(__name__)
+
+
+@task_manager.require_exclusive_lock
+def do_node_clean(task, clean_steps=None):
+ """Internal RPC method to perform cleaning of a node.
+
+ :param task: a TaskManager instance with an exclusive lock on its node
+ :param clean_steps: For a manual clean, the list of clean steps to
+ perform. Is None For automated cleaning (default).
+ For more information, see the clean_steps parameter
+ of :func:`ConductorManager.do_node_clean`.
+ """
+ node = task.node
+ manual_clean = clean_steps is not None
+ clean_type = 'manual' if manual_clean else 'automated'
+ LOG.debug('Starting %(type)s cleaning for node %(node)s',
+ {'type': clean_type, 'node': node.uuid})
+
+ if not manual_clean and utils.skip_automated_cleaning(node):
+ # Skip cleaning, move to AVAILABLE.
+ node.clean_step = None
+ node.save()
+
+ task.process_event('done')
+ LOG.info('Automated cleaning is disabled, node %s has been '
+ 'successfully moved to AVAILABLE state.', node.uuid)
+ return
+
+ # NOTE(dtantsur): this is only reachable during automated cleaning,
+ # for manual cleaning we verify maintenance mode earlier on.
+ if (not CONF.conductor.allow_provisioning_in_maintenance
+ and node.maintenance):
+ msg = _('Cleaning a node in maintenance mode is not allowed')
+ return utils.cleaning_error_handler(task, msg,
+ tear_down_cleaning=False)
+
+ try:
+ # NOTE(ghe): Valid power and network values are needed to perform
+ # a cleaning.
+ task.driver.power.validate(task)
+ task.driver.network.validate(task)
+ except exception.InvalidParameterValue as e:
+ msg = (_('Validation failed. Cannot clean node %(node)s. '
+ 'Error: %(msg)s') %
+ {'node': node.uuid, 'msg': e})
+ return utils.cleaning_error_handler(task, msg)
+
+ if manual_clean:
+ info = node.driver_internal_info
+ info['clean_steps'] = clean_steps
+ node.driver_internal_info = info
+ node.save()
+
+ # Do caching of bios settings if supported by driver,
+ # this will be called for both manual and automated cleaning.
+ try:
+ task.driver.bios.cache_bios_settings(task)
+ except exception.UnsupportedDriverExtension:
+ LOG.warning('BIOS settings are not supported for node %s, '
+ 'skipping', task.node.uuid)
+ # TODO(zshi) remove this check when classic drivers are removed
+ except Exception:
+ msg = (_('Caching of bios settings failed on node %(node)s. '
+ 'Continuing with node cleaning.')
+ % {'node': node.uuid})
+ LOG.exception(msg)
+
+ # Allow the deploy driver to set up the ramdisk again (necessary for
+ # IPA cleaning)
+ try:
+ prepare_result = task.driver.deploy.prepare_cleaning(task)
+ except Exception as e:
+ msg = (_('Failed to prepare node %(node)s for cleaning: %(e)s')
+ % {'node': node.uuid, 'e': e})
+ LOG.exception(msg)
+ return utils.cleaning_error_handler(task, msg)
+
+ if prepare_result == states.CLEANWAIT:
+ # Prepare is asynchronous, the deploy driver will need to
+ # set node.driver_internal_info['clean_steps'] and
+ # node.clean_step and then make an RPC call to
+ # continue_node_clean to start cleaning.
+
+ # For manual cleaning, the target provision state is MANAGEABLE,
+ # whereas for automated cleaning, it is AVAILABLE (the default).
+ target_state = states.MANAGEABLE if manual_clean else None
+ task.process_event('wait', target_state=target_state)
+ return
+
+ try:
+ conductor_steps.set_node_cleaning_steps(task)
+ except (exception.InvalidParameterValue,
+ exception.NodeCleaningFailure) as e:
+ msg = (_('Cannot clean node %(node)s. Error: %(msg)s')
+ % {'node': node.uuid, 'msg': e})
+ return utils.cleaning_error_handler(task, msg)
+
+ steps = node.driver_internal_info.get('clean_steps', [])
+ step_index = 0 if steps else None
+ do_next_clean_step(task, step_index)
+
+
+@task_manager.require_exclusive_lock
+def do_next_clean_step(task, step_index):
+ """Do cleaning, starting from the specified clean step.
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param step_index: The first clean step in the list to execute. This
+ is the index (from 0) into the list of clean steps in the node's
+ driver_internal_info['clean_steps']. Is None if there are no steps
+ to execute.
+ """
+ node = task.node
+ # For manual cleaning, the target provision state is MANAGEABLE,
+ # whereas for automated cleaning, it is AVAILABLE.
+ manual_clean = node.target_provision_state == states.MANAGEABLE
+
+ if step_index is None:
+ steps = []
+ else:
+ steps = node.driver_internal_info['clean_steps'][step_index:]
+
+ LOG.info('Executing %(state)s on node %(node)s, remaining steps: '
+ '%(steps)s', {'node': node.uuid, 'steps': steps,
+ 'state': node.provision_state})
+
+ # Execute each step until we hit an async step or run out of steps
+ for ind, step in enumerate(steps):
+ # Save which step we're about to start so we can restart
+ # if necessary
+ node.clean_step = step
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['clean_step_index'] = step_index + ind
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ interface = getattr(task.driver, step.get('interface'))
+ LOG.info('Executing %(step)s on node %(node)s',
+ {'step': step, 'node': node.uuid})
+ try:
+ result = interface.execute_clean_step(task, step)
+ except Exception as e:
+ if isinstance(e, exception.AgentConnectionFailed):
+ if task.node.driver_internal_info.get('cleaning_reboot'):
+ LOG.info('Agent is not yet running on node %(node)s '
+ 'after cleaning reboot, waiting for agent to '
+ 'come up to run next clean step %(step)s.',
+ {'node': node.uuid, 'step': step})
+ driver_internal_info['skip_current_clean_step'] = False
+ node.driver_internal_info = driver_internal_info
+ target_state = (states.MANAGEABLE if manual_clean
+ else None)
+ task.process_event('wait', target_state=target_state)
+ return
+
+ msg = (_('Node %(node)s failed step %(step)s: '
+ '%(exc)s') %
+ {'node': node.uuid, 'exc': e,
+ 'step': node.clean_step})
+ LOG.exception(msg)
+ utils.cleaning_error_handler(task, msg)
+ return
+
+ # Check if the step is done or not. The step should return
+ # states.CLEANWAIT if the step is still being executed, or
+ # None if the step is done.
+ if result == states.CLEANWAIT:
+ # Kill this worker, the async step will make an RPC call to
+ # continue_node_clean to continue cleaning
+ LOG.info('Clean step %(step)s on node %(node)s being '
+ 'executed asynchronously, waiting for driver.',
+ {'node': node.uuid, 'step': step})
+ target_state = states.MANAGEABLE if manual_clean else None
+ task.process_event('wait', target_state=target_state)
+ return
+ elif result is not None:
+ msg = (_('While executing step %(step)s on node '
+ '%(node)s, step returned invalid value: %(val)s')
+ % {'step': step, 'node': node.uuid, 'val': result})
+ LOG.error(msg)
+ return utils.cleaning_error_handler(task, msg)
+ LOG.info('Node %(node)s finished clean step %(step)s',
+ {'node': node.uuid, 'step': step})
+
+ # Clear clean_step
+ node.clean_step = None
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['clean_steps'] = None
+ driver_internal_info.pop('clean_step_index', None)
+ driver_internal_info.pop('cleaning_reboot', None)
+ driver_internal_info.pop('cleaning_polling', None)
+ # Remove agent_url
+ if not utils.fast_track_able(task):
+ driver_internal_info.pop('agent_url', None)
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ try:
+ task.driver.deploy.tear_down_cleaning(task)
+ except Exception as e:
+ msg = (_('Failed to tear down from cleaning for node %(node)s, '
+ 'reason: %(err)s')
+ % {'node': node.uuid, 'err': e})
+ LOG.exception(msg)
+ return utils.cleaning_error_handler(task, msg,
+ tear_down_cleaning=False)
+
+ LOG.info('Node %s cleaning complete', node.uuid)
+ event = 'manage' if manual_clean or node.retired else 'done'
+ # NOTE(rloo): No need to specify target prov. state; we're done
+ task.process_event(event)
+
+
+@task_manager.require_exclusive_lock
+def do_node_clean_abort(task, step_name=None):
+ """Internal method to abort an ongoing operation.
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param step_name: The name of the clean step.
+ """
+ node = task.node
+ try:
+ task.driver.deploy.tear_down_cleaning(task)
+ except Exception as e:
+ LOG.exception('Failed to tear down cleaning for node %(node)s '
+ 'after aborting the operation. Error: %(err)s',
+ {'node': node.uuid, 'err': e})
+ error_msg = _('Failed to tear down cleaning after aborting '
+ 'the operation')
+ utils.cleaning_error_handler(task, error_msg,
+ tear_down_cleaning=False,
+ set_fail_state=False)
+ return
+
+ info_message = _('Clean operation aborted for node %s') % node.uuid
+ last_error = _('By request, the clean operation was aborted')
+ if step_name:
+ msg = _(' after the completion of step "%s"') % step_name
+ last_error += msg
+ info_message += msg
+
+ node.last_error = last_error
+ node.clean_step = None
+ info = node.driver_internal_info
+ # Clear any leftover metadata about cleaning
+ info.pop('clean_step_index', None)
+ info.pop('cleaning_reboot', None)
+ info.pop('cleaning_polling', None)
+ info.pop('skip_current_clean_step', None)
+ info.pop('agent_url', None)
+ node.driver_internal_info = info
+ node.save()
+ LOG.info(info_message)
diff --git a/ironic/conductor/deployments.py b/ironic/conductor/deployments.py
new file mode 100644
index 000000000..470bed67b
--- /dev/null
+++ b/ironic/conductor/deployments.py
@@ -0,0 +1,402 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functionality related to deploying and undeploying."""
+
+import tempfile
+
+from ironic_lib import metrics_utils
+from oslo_db import exception as db_exception
+from oslo_log import log
+from oslo_utils import excutils
+from oslo_utils import versionutils
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import release_mappings as versions
+from ironic.common import states
+from ironic.common import swift
+from ironic.conductor import notification_utils as notify_utils
+from ironic.conductor import steps as conductor_steps
+from ironic.conductor import task_manager
+from ironic.conductor import utils
+from ironic.conf import CONF
+from ironic.objects import fields
+
+LOG = log.getLogger(__name__)
+
+METRICS = metrics_utils.get_metrics_logger(__name__)
+
+# NOTE(rloo) This is used to keep track of deprecation warnings that have
+# already been issued for deploy drivers that do not use deploy steps.
+_SEEN_NO_DEPLOY_STEP_DEPRECATIONS = set()
+
+
+@METRICS.timer('do_node_deploy')
+@task_manager.require_exclusive_lock
+def do_node_deploy(task, conductor_id=None, configdrive=None):
+ """Prepare the environment and deploy a node."""
+ node = task.node
+
+ try:
+ if configdrive:
+ if isinstance(configdrive, dict):
+ configdrive = utils.build_configdrive(node, configdrive)
+ _store_configdrive(node, configdrive)
+ except (exception.SwiftOperationError, exception.ConfigInvalid) as e:
+ with excutils.save_and_reraise_exception():
+ utils.deploying_error_handler(
+ task,
+ ('Error while uploading the configdrive for %(node)s '
+ 'to Swift') % {'node': node.uuid},
+ _('Failed to upload the configdrive to Swift. '
+ 'Error: %s') % e,
+ clean_up=False)
+ except db_exception.DBDataError as e:
+ with excutils.save_and_reraise_exception():
+ # NOTE(hshiina): This error happens when the configdrive is
+ # too large. Remove the configdrive from the
+ # object to update DB successfully in handling
+ # the failure.
+ node.obj_reset_changes()
+ utils.deploying_error_handler(
+ task,
+ ('Error while storing the configdrive for %(node)s into '
+ 'the database: %(err)s') % {'node': node.uuid, 'err': e},
+ _("Failed to store the configdrive in the database. "
+ "%s") % e,
+ clean_up=False)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ utils.deploying_error_handler(
+ task,
+ ('Unexpected error while preparing the configdrive for '
+ 'node %(node)s') % {'node': node.uuid},
+ _("Failed to prepare the configdrive. Exception: %s") % e,
+ traceback=True, clean_up=False)
+
+ try:
+ task.driver.deploy.prepare(task)
+ except exception.IronicException as e:
+ with excutils.save_and_reraise_exception():
+ utils.deploying_error_handler(
+ task,
+ ('Error while preparing to deploy to node %(node)s: '
+ '%(err)s') % {'node': node.uuid, 'err': e},
+ _("Failed to prepare to deploy: %s") % e,
+ clean_up=False)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ utils.deploying_error_handler(
+ task,
+ ('Unexpected error while preparing to deploy to node '
+ '%(node)s') % {'node': node.uuid},
+ _("Failed to prepare to deploy. Exception: %s") % e,
+ traceback=True, clean_up=False)
+
+ try:
+ # This gets the deploy steps (if any) and puts them in the node's
+ # driver_internal_info['deploy_steps'].
+ conductor_steps.set_node_deployment_steps(task)
+ except exception.InstanceDeployFailure as e:
+ with excutils.save_and_reraise_exception():
+ utils.deploying_error_handler(
+ task,
+ 'Error while getting deploy steps; cannot deploy to node '
+ '%(node)s. Error: %(err)s' % {'node': node.uuid, 'err': e},
+ _("Cannot get deploy steps; failed to deploy: %s") % e)
+
+ steps = node.driver_internal_info.get('deploy_steps', [])
+
+ new_rpc_version = True
+ release_ver = versions.RELEASE_MAPPING.get(CONF.pin_release_version)
+ if release_ver:
+ new_rpc_version = versionutils.is_compatible('1.45',
+ release_ver['rpc'])
+
+ if not steps or not new_rpc_version:
+ # TODO(rloo): This if.. (and the above code wrt rpc version)
+ # can be deleted after the deprecation period when we no
+ # longer support drivers with no deploy steps.
+ # Note that after the deprecation period, there needs to be at least
+ # one deploy step. If none, the deployment fails.
+
+ if steps:
+ info = node.driver_internal_info
+ info.pop('deploy_steps')
+ node.driver_internal_info = info
+ node.save()
+
+ # We go back to using the old way, if:
+ # - out-of-tree driver hasn't yet converted to using deploy steps, or
+ # - we're in the middle of a rolling upgrade. This is to prevent the
+ # corner case of having new conductors with old conductors, and
+ # a node is deployed with a new conductor (via deploy steps), but
+ # after the deploy_wait, the node gets handled by an old conductor.
+ # To avoid this, we need to wait until all the conductors are new,
+ # signalled by the RPC API version being '1.45'.
+ _old_rest_of_do_node_deploy(task, conductor_id, not steps)
+ else:
+ do_next_deploy_step(task, 0, conductor_id)
+
+
+def _old_rest_of_do_node_deploy(task, conductor_id, no_deploy_steps):
+ """The rest of the do_node_deploy() if not using deploy steps.
+
+ To support out-of-tree drivers that have not yet migrated to using
+ deploy steps.
+
+ :param no_deploy_steps: Boolean; True if there are no deploy steps.
+ """
+ # TODO(rloo): This method can be deleted after the deprecation period
+ # for supporting drivers with no deploy steps.
+
+ if no_deploy_steps:
+ deploy_driver_name = task.driver.deploy.__class__.__name__
+ if deploy_driver_name not in _SEEN_NO_DEPLOY_STEP_DEPRECATIONS:
+ LOG.warning('Deploy driver %s does not support deploy steps; this '
+ 'will be required starting with the Stein release.',
+ deploy_driver_name)
+ _SEEN_NO_DEPLOY_STEP_DEPRECATIONS.add(deploy_driver_name)
+
+ node = task.node
+ try:
+ new_state = task.driver.deploy.deploy(task)
+ except exception.IronicException as e:
+ with excutils.save_and_reraise_exception():
+ utils.deploying_error_handler(
+ task,
+ ('Error in deploy of node %(node)s: %(err)s' %
+ {'node': node.uuid, 'err': e}),
+ _("Failed to deploy: %s") % e)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ utils.deploying_error_handler(
+ task,
+ ('Unexpected error while deploying node %(node)s' %
+ {'node': node.uuid}),
+ _("Failed to deploy. Exception: %s") % e,
+ traceback=True)
+
+ # Update conductor_affinity to reference this conductor's ID
+ # since there may be local persistent state
+ node.conductor_affinity = conductor_id
+
+ # NOTE(deva): Some drivers may return states.DEPLOYWAIT
+ # eg. if they are waiting for a callback
+ if new_state == states.DEPLOYDONE:
+ _start_console_in_deploy(task)
+ task.process_event('done')
+ LOG.info('Successfully deployed node %(node)s with '
+ 'instance %(instance)s.',
+ {'node': node.uuid, 'instance': node.instance_uuid})
+ elif new_state == states.DEPLOYWAIT:
+ task.process_event('wait')
+ else:
+ LOG.error('Unexpected state %(state)s returned while '
+ 'deploying node %(node)s.',
+ {'state': new_state, 'node': node.uuid})
+ node.save()
+
+
+@task_manager.require_exclusive_lock
+def do_next_deploy_step(task, step_index, conductor_id):
+ """Do deployment, starting from the specified deploy step.
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param step_index: The first deploy step in the list to execute. This
+ is the index (from 0) into the list of deploy steps in the node's
+ driver_internal_info['deploy_steps']. Is None if there are no steps
+ to execute.
+ """
+ node = task.node
+ if step_index is None:
+ steps = []
+ else:
+ steps = node.driver_internal_info['deploy_steps'][step_index:]
+
+ LOG.info('Executing %(state)s on node %(node)s, remaining steps: '
+ '%(steps)s', {'node': node.uuid, 'steps': steps,
+ 'state': node.provision_state})
+
+ # Execute each step until we hit an async step or run out of steps
+ for ind, step in enumerate(steps):
+ # Save which step we're about to start so we can restart
+ # if necessary
+ node.deploy_step = step
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['deploy_step_index'] = step_index + ind
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ interface = getattr(task.driver, step.get('interface'))
+ LOG.info('Executing %(step)s on node %(node)s',
+ {'step': step, 'node': node.uuid})
+ try:
+ result = interface.execute_deploy_step(task, step)
+ except exception.IronicException as e:
+ if isinstance(e, exception.AgentConnectionFailed):
+ if task.node.driver_internal_info.get('deployment_reboot'):
+ LOG.info('Agent is not yet running on node %(node)s after '
+ 'deployment reboot, waiting for agent to come up '
+ 'to run next deploy step %(step)s.',
+ {'node': node.uuid, 'step': step})
+ driver_internal_info['skip_current_deploy_step'] = False
+ node.driver_internal_info = driver_internal_info
+ task.process_event('wait')
+ return
+ log_msg = ('Node %(node)s failed deploy step %(step)s. Error: '
+ '%(err)s' %
+ {'node': node.uuid, 'step': node.deploy_step, 'err': e})
+ utils.deploying_error_handler(
+ task, log_msg,
+ _("Failed to deploy: %s") % node.deploy_step)
+ return
+ except Exception as e:
+ log_msg = ('Node %(node)s failed deploy step %(step)s with '
+ 'unexpected error: %(err)s' %
+ {'node': node.uuid, 'step': node.deploy_step, 'err': e})
+ utils.deploying_error_handler(
+ task, log_msg,
+ _("Failed to deploy. Exception: %s") % e, traceback=True)
+ return
+
+ if ind == 0:
+ # We've done the very first deploy step.
+ # Update conductor_affinity to reference this conductor's ID
+ # since there may be local persistent state
+ node.conductor_affinity = conductor_id
+ node.save()
+
+ # Check if the step is done or not. The step should return
+ # states.DEPLOYWAIT if the step is still being executed, or
+ # None if the step is done.
+ # NOTE(deva): Some drivers may return states.DEPLOYWAIT
+ # eg. if they are waiting for a callback
+ if result == states.DEPLOYWAIT:
+ # Kill this worker, the async step will make an RPC call to
+ # continue_node_deploy() to continue deploying
+ LOG.info('Deploy step %(step)s on node %(node)s being '
+ 'executed asynchronously, waiting for driver.',
+ {'node': node.uuid, 'step': step})
+ task.process_event('wait')
+ return
+ elif result is not None:
+ # NOTE(rloo): This is an internal/dev error; shouldn't happen.
+ log_msg = (_('While executing deploy step %(step)s on node '
+ '%(node)s, step returned unexpected state: %(val)s')
+ % {'step': step, 'node': node.uuid, 'val': result})
+ utils.deploying_error_handler(
+ task, log_msg,
+ _("Failed to deploy: %s") % node.deploy_step)
+ return
+
+ LOG.info('Node %(node)s finished deploy step %(step)s',
+ {'node': node.uuid, 'step': step})
+
+ # Finished executing the steps. Clear deploy_step.
+ node.deploy_step = None
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['deploy_steps'] = None
+ driver_internal_info.pop('deploy_step_index', None)
+ driver_internal_info.pop('deployment_reboot', None)
+ driver_internal_info.pop('deployment_polling', None)
+ # Remove the agent_url cached from the deployment.
+ driver_internal_info.pop('agent_url', None)
+ node.driver_internal_info = driver_internal_info
+ node.save()
+
+ _start_console_in_deploy(task)
+
+ task.process_event('done')
+ LOG.info('Successfully deployed node %(node)s with '
+ 'instance %(instance)s.',
+ {'node': node.uuid, 'instance': node.instance_uuid})
+
+
+def _get_configdrive_obj_name(node):
+ """Generate the object name for the config drive."""
+ return 'configdrive-%s' % node.uuid
+
+
+def _store_configdrive(node, configdrive):
+ """Handle the storage of the config drive.
+
+ If configured, the config drive data are uploaded to a swift endpoint.
+ The Node's instance_info is updated to include either the temporary
+ Swift URL from the upload, or if no upload, the actual config drive data.
+
+ :param node: an Ironic node object.
+ :param configdrive: A gzipped and base64 encoded configdrive.
+ :raises: SwiftOperationError if an error occur when uploading the
+ config drive to the swift endpoint.
+ :raises: ConfigInvalid if required keystone authorization credentials
+ with swift are missing.
+
+
+ """
+ if CONF.deploy.configdrive_use_object_store:
+ # NOTE(lucasagomes): No reason to use a different timeout than
+ # the one used for deploying the node
+ timeout = (CONF.conductor.configdrive_swift_temp_url_duration
+ or CONF.conductor.deploy_callback_timeout
+ # The documented default in ironic.conf.conductor
+ or 1800)
+ container = CONF.conductor.configdrive_swift_container
+ object_name = _get_configdrive_obj_name(node)
+
+ object_headers = {'X-Delete-After': str(timeout)}
+
+ with tempfile.NamedTemporaryFile(dir=CONF.tempdir) as fileobj:
+ fileobj.write(configdrive)
+ fileobj.flush()
+
+ swift_api = swift.SwiftAPI()
+ swift_api.create_object(container, object_name, fileobj.name,
+ object_headers=object_headers)
+ configdrive = swift_api.get_temp_url(container, object_name,
+ timeout)
+
+ i_info = node.instance_info
+ i_info['configdrive'] = configdrive
+ node.instance_info = i_info
+ node.save()
+
+
+def _start_console_in_deploy(task):
+ """Start console at the end of deployment.
+
+ Console is stopped at tearing down not to be exposed to an instance user.
+ Then, restart at deployment.
+
+ :param task: a TaskManager instance with an exclusive lock
+ """
+
+ if not task.node.console_enabled:
+ return
+
+ notify_utils.emit_console_notification(
+ task, 'console_restore', fields.NotificationStatus.START)
+ try:
+ task.driver.console.start_console(task)
+ except Exception as err:
+ msg = (_('Failed to start console while deploying the '
+ 'node %(node)s: %(err)s.') % {'node': task.node.uuid,
+ 'err': err})
+ LOG.error(msg)
+ task.node.last_error = msg
+ task.node.console_enabled = False
+ task.node.save()
+ notify_utils.emit_console_notification(
+ task, 'console_restore', fields.NotificationStatus.ERROR)
+ else:
+ notify_utils.emit_console_notification(
+ task, 'console_restore', fields.NotificationStatus.END)
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 2f7ac081a..e01ffaf4a 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -43,18 +43,15 @@ notifying Neutron of a change, etc.
import collections
import datetime
import queue
-import tempfile
import eventlet
from futurist import periodics
from futurist import waiters
from ironic_lib import metrics_utils
-from oslo_db import exception as db_exception
from oslo_log import log
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import uuidutils
-from oslo_utils import versionutils
from ironic.common import driver_factory
from ironic.common import exception
@@ -64,11 +61,11 @@ from ironic.common.i18n import _
from ironic.common import images
from ironic.common import network
from ironic.common import nova
-from ironic.common import release_mappings as versions
from ironic.common import states
-from ironic.common import swift
from ironic.conductor import allocations
from ironic.conductor import base_manager
+from ironic.conductor import cleaning
+from ironic.conductor import deployments
from ironic.conductor import notification_utils as notify_utils
from ironic.conductor import steps as conductor_steps
from ironic.conductor import task_manager
@@ -87,10 +84,6 @@ METRICS = metrics_utils.get_metrics_logger(__name__)
SYNC_EXCLUDED_STATES = (states.DEPLOYWAIT, states.CLEANWAIT, states.ENROLL)
-# NOTE(rloo) This is used to keep track of deprecation warnings that have
-# already been issued for deploy drivers that do not use deploy steps.
-_SEEN_NO_DEPLOY_STEP_DEPRECATIONS = set()
-
class ConductorManager(base_manager.BaseConductorManager):
"""Ironic Conductor manager main class."""
@@ -151,6 +144,22 @@ class ConductorManager(base_manager.BaseConductorManager):
"The protected_reason field can only be set when "
"protected is True")
+ def _check_update_retired(self, node_obj, delta):
+ if 'retired' in delta:
+ if not node_obj.retired:
+ node_obj.retired_reason = None
+ elif node_obj.provision_state == states.AVAILABLE:
+ raise exception.InvalidState(
+ "Node %(node)s can not have 'retired' set in provision "
+ "state 'available', the current state is '%(state)s'" %
+ {'node': node_obj.uuid, 'state': node_obj.provision_state})
+
+ if ('retired_reason' in delta and node_obj.retired_reason and not
+ node_obj.retired):
+ raise exception.InvalidParameterValue(
+ "The retired_reason field can only be set when "
+ "retired is True")
+
@METRICS.timer('ConductorManager.update_node')
# No need to add these since they are subclasses of InvalidParameterValue:
# InterfaceNotFoundInEntrypoint
@@ -187,6 +196,7 @@ class ConductorManager(base_manager.BaseConductorManager):
node_obj.fault = None
self._check_update_protected(node_obj, delta)
+ self._check_update_retired(node_obj, delta)
# TODO(dtantsur): reconsider allowing changing some (but not all)
# interfaces for active nodes in the future.
@@ -875,18 +885,14 @@ class ConductorManager(base_manager.BaseConductorManager):
task.process_event(
event,
callback=self._spawn_worker,
- call_args=(do_node_deploy, task, self.conductor.id,
- configdrive),
+ call_args=(deployments.do_node_deploy, task,
+ self.conductor.id, configdrive),
err_handler=utils.provisioning_error_handler)
except exception.InvalidState:
raise exception.InvalidStateRequested(
action=event, node=task.node.uuid,
state=task.node.provision_state)
- def _get_node_next_deploy_steps(self, task, skip_current_step=True):
- return self._get_node_next_steps(task, 'deploy',
- skip_current_step=skip_current_step)
-
@METRICS.timer('ConductorManager.continue_node_deploy')
def continue_node_deploy(self, context, node_id):
"""RPC method to continue deploying a node.
@@ -937,7 +943,7 @@ class ConductorManager(base_manager.BaseConductorManager):
node.driver_internal_info = info
node.save()
- next_step_index = self._get_node_next_deploy_steps(
+ next_step_index = utils.get_node_next_deploy_steps(
task, skip_current_step=skip_current_step)
# TODO(rloo): When deprecation period is over and node is in
@@ -949,7 +955,7 @@ class ConductorManager(base_manager.BaseConductorManager):
task.node)
task.spawn_after(
self._spawn_worker,
- _do_next_deploy_step,
+ deployments.do_next_deploy_step,
task, next_step_index, self.conductor.id)
@METRICS.timer('ConductorManager.do_node_tear_down')
@@ -1081,63 +1087,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# Begin cleaning
task.process_event('clean')
- self._do_node_clean(task)
-
- def _get_node_next_steps(self, task, step_type,
- skip_current_step=True):
- """Get the task's node's next steps.
-
- This determines what the next (remaining) steps are, and
- returns the index into the steps list that corresponds to the
- next step. The remaining steps are determined as follows:
-
- * If no steps have been started yet, all the steps
- must be executed
- * If skip_current_step is False, the remaining steps start
- with the current step. Otherwise, the remaining steps
- start with the step after the current one.
-
- All the steps are in node.driver_internal_info['<step_type>_steps'].
- node.<step_type>_step is the current step that was just executed
- (or None, {} if no steps have been executed yet).
- node.driver_internal_info['<step_type>_step_index'] is the index
- index into the steps list (or None, doesn't exist if no steps have
- been executed yet) and corresponds to node.<step_type>_step.
-
- :param task: A TaskManager object
- :param step_type: The type of steps to process: 'clean' or 'deploy'.
- :param skip_current_step: True to skip the current step; False to
- include it.
- :returns: index of the next step; None if there are none to execute.
-
- """
- valid_types = set(['clean', 'deploy'])
- if step_type not in valid_types:
- # NOTE(rloo): No need to i18n this, since this would be a
- # developer error; it isn't user-facing.
- raise exception.Invalid(
- 'step_type must be one of %(valid)s, not %(step)s'
- % {'valid': valid_types, 'step': step_type})
- node = task.node
- if not getattr(node, '%s_step' % step_type):
- # first time through, all steps need to be done. Return the
- # index of the first step in the list.
- return 0
-
- ind = node.driver_internal_info.get('%s_step_index' % step_type)
- if ind is None:
- return None
-
- if skip_current_step:
- ind += 1
- if ind >= len(node.driver_internal_info['%s_steps' % step_type]):
- # no steps left to do
- ind = None
- return ind
-
- def _get_node_next_clean_steps(self, task, skip_current_step=True):
- return self._get_node_next_steps(task, 'clean',
- skip_current_step=skip_current_step)
+ cleaning.do_node_clean(task)
@METRICS.timer('ConductorManager.do_node_clean')
@messaging.expected_exceptions(exception.InvalidParameterValue,
@@ -1184,10 +1134,10 @@ class ConductorManager(base_manager.BaseConductorManager):
raise exception.NodeInMaintenance(op=_('cleaning'),
node=node.uuid)
- # NOTE(rloo): _do_node_clean() will also make similar calls to
- # validate power & network, but we are doing it again here so that
- # the user gets immediate feedback of any issues. This behaviour
- # (of validating) is consistent with other methods like
+ # NOTE(rloo): cleaning.do_node_clean() will also make similar calls
+ # to validate power & network, but we are doing it again here so
+ # that the user gets immediate feedback of any issues. This
+ # behaviour (of validating) is consistent with other methods like
# self.do_node_deploy().
try:
task.driver.power.validate(task)
@@ -1202,7 +1152,7 @@ class ConductorManager(base_manager.BaseConductorManager):
task.process_event(
'clean',
callback=self._spawn_worker,
- call_args=(self._do_node_clean, task, clean_steps),
+ call_args=(cleaning.do_node_clean, task, clean_steps),
err_handler=utils.provisioning_error_handler,
target_state=states.MANAGEABLE)
except exception.InvalidState:
@@ -1259,7 +1209,7 @@ class ConductorManager(base_manager.BaseConductorManager):
node.driver_internal_info = info
node.save()
- next_step_index = self._get_node_next_clean_steps(
+ next_step_index = utils.get_node_next_clean_steps(
task, skip_current_step=skip_current_step)
# If this isn't the final clean step in the cleaning operation
@@ -1275,7 +1225,7 @@ class ConductorManager(base_manager.BaseConductorManager):
task.process_event(
'abort',
callback=self._spawn_worker,
- call_args=(self._do_node_clean_abort,
+ call_args=(cleaning.do_node_clean_abort,
task, step_name),
err_handler=utils.provisioning_error_handler,
target_state=target_state)
@@ -1294,217 +1244,10 @@ class ConductorManager(base_manager.BaseConductorManager):
task.node)
task.spawn_after(
self._spawn_worker,
- self._do_next_clean_step,
+ cleaning.do_next_clean_step,
task, next_step_index)
@task_manager.require_exclusive_lock
- def _do_node_clean(self, task, clean_steps=None):
- """Internal RPC method to perform cleaning of a node.
-
- :param task: a TaskManager instance with an exclusive lock on its node
- :param clean_steps: For a manual clean, the list of clean steps to
- perform. Is None For automated cleaning (default).
- For more information, see the clean_steps parameter
- of :func:`ConductorManager.do_node_clean`.
- """
- node = task.node
- manual_clean = clean_steps is not None
- clean_type = 'manual' if manual_clean else 'automated'
- LOG.debug('Starting %(type)s cleaning for node %(node)s',
- {'type': clean_type, 'node': node.uuid})
-
- if not manual_clean and utils.skip_automated_cleaning(node):
- # Skip cleaning, move to AVAILABLE.
- node.clean_step = None
- node.save()
-
- task.process_event('done')
- LOG.info('Automated cleaning is disabled, node %s has been '
- 'successfully moved to AVAILABLE state.', node.uuid)
- return
-
- # NOTE(dtantsur): this is only reachable during automated cleaning,
- # for manual cleaning we verify maintenance mode earlier on.
- if (not CONF.conductor.allow_provisioning_in_maintenance
- and node.maintenance):
- msg = _('Cleaning a node in maintenance mode is not allowed')
- return utils.cleaning_error_handler(task, msg,
- tear_down_cleaning=False)
-
- try:
- # NOTE(ghe): Valid power and network values are needed to perform
- # a cleaning.
- task.driver.power.validate(task)
- task.driver.network.validate(task)
- except exception.InvalidParameterValue as e:
- msg = (_('Validation failed. Cannot clean node %(node)s. '
- 'Error: %(msg)s') %
- {'node': node.uuid, 'msg': e})
- return utils.cleaning_error_handler(task, msg)
-
- if manual_clean:
- info = node.driver_internal_info
- info['clean_steps'] = clean_steps
- node.driver_internal_info = info
- node.save()
-
- # Do caching of bios settings if supported by driver,
- # this will be called for both manual and automated cleaning.
- try:
- task.driver.bios.cache_bios_settings(task)
- except exception.UnsupportedDriverExtension:
- LOG.warning('BIOS settings are not supported for node %s, '
- 'skipping', task.node.uuid)
- # TODO(zshi) remove this check when classic drivers are removed
- except Exception:
- msg = (_('Caching of bios settings failed on node %(node)s. '
- 'Continuing with node cleaning.')
- % {'node': node.uuid})
- LOG.exception(msg)
-
- # Allow the deploy driver to set up the ramdisk again (necessary for
- # IPA cleaning)
- try:
- prepare_result = task.driver.deploy.prepare_cleaning(task)
- except Exception as e:
- msg = (_('Failed to prepare node %(node)s for cleaning: %(e)s')
- % {'node': node.uuid, 'e': e})
- LOG.exception(msg)
- return utils.cleaning_error_handler(task, msg)
-
- if prepare_result == states.CLEANWAIT:
- # Prepare is asynchronous, the deploy driver will need to
- # set node.driver_internal_info['clean_steps'] and
- # node.clean_step and then make an RPC call to
- # continue_node_clean to start cleaning.
-
- # For manual cleaning, the target provision state is MANAGEABLE,
- # whereas for automated cleaning, it is AVAILABLE (the default).
- target_state = states.MANAGEABLE if manual_clean else None
- task.process_event('wait', target_state=target_state)
- return
-
- try:
- conductor_steps.set_node_cleaning_steps(task)
- except (exception.InvalidParameterValue,
- exception.NodeCleaningFailure) as e:
- msg = (_('Cannot clean node %(node)s. Error: %(msg)s')
- % {'node': node.uuid, 'msg': e})
- return utils.cleaning_error_handler(task, msg)
-
- steps = node.driver_internal_info.get('clean_steps', [])
- step_index = 0 if steps else None
- self._do_next_clean_step(task, step_index)
-
- @task_manager.require_exclusive_lock
- def _do_next_clean_step(self, task, step_index):
- """Do cleaning, starting from the specified clean step.
-
- :param task: a TaskManager instance with an exclusive lock
- :param step_index: The first clean step in the list to execute. This
- is the index (from 0) into the list of clean steps in the node's
- driver_internal_info['clean_steps']. Is None if there are no steps
- to execute.
- """
- node = task.node
- # For manual cleaning, the target provision state is MANAGEABLE,
- # whereas for automated cleaning, it is AVAILABLE.
- manual_clean = node.target_provision_state == states.MANAGEABLE
-
- if step_index is None:
- steps = []
- else:
- steps = node.driver_internal_info['clean_steps'][step_index:]
-
- LOG.info('Executing %(state)s on node %(node)s, remaining steps: '
- '%(steps)s', {'node': node.uuid, 'steps': steps,
- 'state': node.provision_state})
-
- # Execute each step until we hit an async step or run out of steps
- for ind, step in enumerate(steps):
- # Save which step we're about to start so we can restart
- # if necessary
- node.clean_step = step
- driver_internal_info = node.driver_internal_info
- driver_internal_info['clean_step_index'] = step_index + ind
- node.driver_internal_info = driver_internal_info
- node.save()
- interface = getattr(task.driver, step.get('interface'))
- LOG.info('Executing %(step)s on node %(node)s',
- {'step': step, 'node': node.uuid})
- try:
- result = interface.execute_clean_step(task, step)
- except Exception as e:
- if isinstance(e, exception.AgentConnectionFailed):
- if task.node.driver_internal_info.get('cleaning_reboot'):
- LOG.info('Agent is not yet running on node %(node)s '
- 'after cleaning reboot, waiting for agent to '
- 'come up to run next clean step %(step)s.',
- {'node': node.uuid, 'step': step})
- driver_internal_info['skip_current_clean_step'] = False
- node.driver_internal_info = driver_internal_info
- target_state = (states.MANAGEABLE if manual_clean
- else None)
- task.process_event('wait', target_state=target_state)
- return
-
- msg = (_('Node %(node)s failed step %(step)s: '
- '%(exc)s') %
- {'node': node.uuid, 'exc': e,
- 'step': node.clean_step})
- LOG.exception(msg)
- utils.cleaning_error_handler(task, msg)
- return
-
- # Check if the step is done or not. The step should return
- # states.CLEANWAIT if the step is still being executed, or
- # None if the step is done.
- if result == states.CLEANWAIT:
- # Kill this worker, the async step will make an RPC call to
- # continue_node_clean to continue cleaning
- LOG.info('Clean step %(step)s on node %(node)s being '
- 'executed asynchronously, waiting for driver.',
- {'node': node.uuid, 'step': step})
- target_state = states.MANAGEABLE if manual_clean else None
- task.process_event('wait', target_state=target_state)
- return
- elif result is not None:
- msg = (_('While executing step %(step)s on node '
- '%(node)s, step returned invalid value: %(val)s')
- % {'step': step, 'node': node.uuid, 'val': result})
- LOG.error(msg)
- return utils.cleaning_error_handler(task, msg)
- LOG.info('Node %(node)s finished clean step %(step)s',
- {'node': node.uuid, 'step': step})
-
- # Clear clean_step
- node.clean_step = None
- driver_internal_info = node.driver_internal_info
- driver_internal_info['clean_steps'] = None
- driver_internal_info.pop('clean_step_index', None)
- driver_internal_info.pop('cleaning_reboot', None)
- driver_internal_info.pop('cleaning_polling', None)
- # Remove agent_url
- if not utils.fast_track_able(task):
- driver_internal_info.pop('agent_url', None)
- node.driver_internal_info = driver_internal_info
- node.save()
- try:
- task.driver.deploy.tear_down_cleaning(task)
- except Exception as e:
- msg = (_('Failed to tear down from cleaning for node %(node)s, '
- 'reason: %(err)s')
- % {'node': node.uuid, 'err': e})
- LOG.exception(msg)
- return utils.cleaning_error_handler(task, msg,
- tear_down_cleaning=False)
-
- LOG.info('Node %s cleaning complete', node.uuid)
- event = 'manage' if manual_clean else 'done'
- # NOTE(rloo): No need to specify target prov. state; we're done
- task.process_event(event)
-
- @task_manager.require_exclusive_lock
def _do_node_verify(self, task):
"""Internal method to perform power credentials verification."""
node = task.node
@@ -1540,47 +1283,6 @@ class ConductorManager(base_manager.BaseConductorManager):
node.last_error = error
task.process_event('fail')
- @task_manager.require_exclusive_lock
- def _do_node_clean_abort(self, task, step_name=None):
- """Internal method to abort an ongoing operation.
-
- :param task: a TaskManager instance with an exclusive lock
- :param step_name: The name of the clean step.
- """
- node = task.node
- try:
- task.driver.deploy.tear_down_cleaning(task)
- except Exception as e:
- LOG.exception('Failed to tear down cleaning for node %(node)s '
- 'after aborting the operation. Error: %(err)s',
- {'node': node.uuid, 'err': e})
- error_msg = _('Failed to tear down cleaning after aborting '
- 'the operation')
- utils.cleaning_error_handler(task, error_msg,
- tear_down_cleaning=False,
- set_fail_state=False)
- return
-
- info_message = _('Clean operation aborted for node %s') % node.uuid
- last_error = _('By request, the clean operation was aborted')
- if step_name:
- msg = _(' after the completion of step "%s"') % step_name
- last_error += msg
- info_message += msg
-
- node.last_error = last_error
- node.clean_step = None
- info = node.driver_internal_info
- # Clear any leftover metadata about cleaning
- info.pop('clean_step_index', None)
- info.pop('cleaning_reboot', None)
- info.pop('cleaning_polling', None)
- info.pop('skip_current_clean_step', None)
- info.pop('agent_url', None)
- node.driver_internal_info = info
- node.save()
- LOG.info(info_message)
-
@METRICS.timer('ConductorManager.do_provisioning_action')
@messaging.expected_exceptions(exception.NoFreeConductorWorker,
exception.NodeLocked,
@@ -1613,10 +1315,13 @@ class ConductorManager(base_manager.BaseConductorManager):
and node.maintenance):
raise exception.NodeInMaintenance(op=_('providing'),
node=node.uuid)
+ if (node.retired):
+ raise exception.NodeIsRetired(op=_('providing'),
+ node=node.uuid)
task.process_event(
'provide',
callback=self._spawn_worker,
- call_args=(self._do_node_clean, task),
+ call_args=(cleaning.do_node_clean, task),
err_handler=utils.provisioning_error_handler)
return
@@ -1688,7 +1393,7 @@ class ConductorManager(base_manager.BaseConductorManager):
task.process_event(
'abort',
callback=self._spawn_worker,
- call_args=(self._do_node_clean_abort, task),
+ call_args=(cleaning.do_node_clean_abort, task),
err_handler=utils.provisioning_error_handler,
target_state=target_state)
return
@@ -3660,367 +3365,6 @@ def get_vendor_passthru_metadata(route_dict):
return d
-def _get_configdrive_obj_name(node):
- """Generate the object name for the config drive."""
- return 'configdrive-%s' % node.uuid
-
-
-def _store_configdrive(node, configdrive):
- """Handle the storage of the config drive.
-
- If configured, the config drive data are uploaded to a swift endpoint.
- The Node's instance_info is updated to include either the temporary
- Swift URL from the upload, or if no upload, the actual config drive data.
-
- :param node: an Ironic node object.
- :param configdrive: A gzipped and base64 encoded configdrive.
- :raises: SwiftOperationError if an error occur when uploading the
- config drive to the swift endpoint.
- :raises: ConfigInvalid if required keystone authorization credentials
- with swift are missing.
-
-
- """
- if CONF.deploy.configdrive_use_object_store:
- # NOTE(lucasagomes): No reason to use a different timeout than
- # the one used for deploying the node
- timeout = (CONF.conductor.configdrive_swift_temp_url_duration
- or CONF.conductor.deploy_callback_timeout
- # The documented default in ironic.conf.conductor
- or 1800)
- container = CONF.conductor.configdrive_swift_container
- object_name = _get_configdrive_obj_name(node)
-
- object_headers = {'X-Delete-After': str(timeout)}
-
- with tempfile.NamedTemporaryFile(dir=CONF.tempdir) as fileobj:
- fileobj.write(configdrive)
- fileobj.flush()
-
- swift_api = swift.SwiftAPI()
- swift_api.create_object(container, object_name, fileobj.name,
- object_headers=object_headers)
- configdrive = swift_api.get_temp_url(container, object_name,
- timeout)
-
- i_info = node.instance_info
- i_info['configdrive'] = configdrive
- node.instance_info = i_info
- node.save()
-
-
-@METRICS.timer('do_node_deploy')
-@task_manager.require_exclusive_lock
-def do_node_deploy(task, conductor_id=None, configdrive=None):
- """Prepare the environment and deploy a node."""
- node = task.node
-
- try:
- if configdrive:
- if isinstance(configdrive, dict):
- configdrive = utils.build_configdrive(node, configdrive)
- _store_configdrive(node, configdrive)
- except (exception.SwiftOperationError, exception.ConfigInvalid) as e:
- with excutils.save_and_reraise_exception():
- utils.deploying_error_handler(
- task,
- ('Error while uploading the configdrive for %(node)s '
- 'to Swift') % {'node': node.uuid},
- _('Failed to upload the configdrive to Swift. '
- 'Error: %s') % e,
- clean_up=False)
- except db_exception.DBDataError as e:
- with excutils.save_and_reraise_exception():
- # NOTE(hshiina): This error happens when the configdrive is
- # too large. Remove the configdrive from the
- # object to update DB successfully in handling
- # the failure.
- node.obj_reset_changes()
- utils.deploying_error_handler(
- task,
- ('Error while storing the configdrive for %(node)s into '
- 'the database: %(err)s') % {'node': node.uuid, 'err': e},
- _("Failed to store the configdrive in the database. "
- "%s") % e,
- clean_up=False)
- except Exception as e:
- with excutils.save_and_reraise_exception():
- utils.deploying_error_handler(
- task,
- ('Unexpected error while preparing the configdrive for '
- 'node %(node)s') % {'node': node.uuid},
- _("Failed to prepare the configdrive. Exception: %s") % e,
- traceback=True, clean_up=False)
-
- try:
- task.driver.deploy.prepare(task)
- except exception.IronicException as e:
- with excutils.save_and_reraise_exception():
- utils.deploying_error_handler(
- task,
- ('Error while preparing to deploy to node %(node)s: '
- '%(err)s') % {'node': node.uuid, 'err': e},
- _("Failed to prepare to deploy: %s") % e,
- clean_up=False)
- except Exception as e:
- with excutils.save_and_reraise_exception():
- utils.deploying_error_handler(
- task,
- ('Unexpected error while preparing to deploy to node '
- '%(node)s') % {'node': node.uuid},
- _("Failed to prepare to deploy. Exception: %s") % e,
- traceback=True, clean_up=False)
-
- try:
- # This gets the deploy steps (if any) and puts them in the node's
- # driver_internal_info['deploy_steps'].
- conductor_steps.set_node_deployment_steps(task)
- except exception.InstanceDeployFailure as e:
- with excutils.save_and_reraise_exception():
- utils.deploying_error_handler(
- task,
- 'Error while getting deploy steps; cannot deploy to node '
- '%(node)s. Error: %(err)s' % {'node': node.uuid, 'err': e},
- _("Cannot get deploy steps; failed to deploy: %s") % e)
-
- steps = node.driver_internal_info.get('deploy_steps', [])
-
- new_rpc_version = True
- release_ver = versions.RELEASE_MAPPING.get(CONF.pin_release_version)
- if release_ver:
- new_rpc_version = versionutils.is_compatible('1.45',
- release_ver['rpc'])
-
- if not steps or not new_rpc_version:
- # TODO(rloo): This if.. (and the above code wrt rpc version)
- # can be deleted after the deprecation period when we no
- # longer support drivers with no deploy steps.
- # Note that after the deprecation period, there needs to be at least
- # one deploy step. If none, the deployment fails.
-
- if steps:
- info = node.driver_internal_info
- info.pop('deploy_steps')
- node.driver_internal_info = info
- node.save()
-
- # We go back to using the old way, if:
- # - out-of-tree driver hasn't yet converted to using deploy steps, or
- # - we're in the middle of a rolling upgrade. This is to prevent the
- # corner case of having new conductors with old conductors, and
- # a node is deployed with a new conductor (via deploy steps), but
- # after the deploy_wait, the node gets handled by an old conductor.
- # To avoid this, we need to wait until all the conductors are new,
- # signalled by the RPC API version being '1.45'.
- _old_rest_of_do_node_deploy(task, conductor_id, not steps)
- else:
- _do_next_deploy_step(task, 0, conductor_id)
-
-
-def _old_rest_of_do_node_deploy(task, conductor_id, no_deploy_steps):
- """The rest of the do_node_deploy() if not using deploy steps.
-
- To support out-of-tree drivers that have not yet migrated to using
- deploy steps.
-
- :param no_deploy_steps: Boolean; True if there are no deploy steps.
- """
- # TODO(rloo): This method can be deleted after the deprecation period
- # for supporting drivers with no deploy steps.
-
- if no_deploy_steps:
- deploy_driver_name = task.driver.deploy.__class__.__name__
- if deploy_driver_name not in _SEEN_NO_DEPLOY_STEP_DEPRECATIONS:
- LOG.warning('Deploy driver %s does not support deploy steps; this '
- 'will be required starting with the Stein release.',
- deploy_driver_name)
- _SEEN_NO_DEPLOY_STEP_DEPRECATIONS.add(deploy_driver_name)
-
- node = task.node
- try:
- new_state = task.driver.deploy.deploy(task)
- except exception.IronicException as e:
- with excutils.save_and_reraise_exception():
- utils.deploying_error_handler(
- task,
- ('Error in deploy of node %(node)s: %(err)s' %
- {'node': node.uuid, 'err': e}),
- _("Failed to deploy: %s") % e)
- except Exception as e:
- with excutils.save_and_reraise_exception():
- utils.deploying_error_handler(
- task,
- ('Unexpected error while deploying node %(node)s' %
- {'node': node.uuid}),
- _("Failed to deploy. Exception: %s") % e,
- traceback=True)
-
- # Update conductor_affinity to reference this conductor's ID
- # since there may be local persistent state
- node.conductor_affinity = conductor_id
-
- # NOTE(deva): Some drivers may return states.DEPLOYWAIT
- # eg. if they are waiting for a callback
- if new_state == states.DEPLOYDONE:
- _start_console_in_deploy(task)
- task.process_event('done')
- LOG.info('Successfully deployed node %(node)s with '
- 'instance %(instance)s.',
- {'node': node.uuid, 'instance': node.instance_uuid})
- elif new_state == states.DEPLOYWAIT:
- task.process_event('wait')
- else:
- LOG.error('Unexpected state %(state)s returned while '
- 'deploying node %(node)s.',
- {'state': new_state, 'node': node.uuid})
- node.save()
-
-
-@task_manager.require_exclusive_lock
-def _do_next_deploy_step(task, step_index, conductor_id):
- """Do deployment, starting from the specified deploy step.
-
- :param task: a TaskManager instance with an exclusive lock
- :param step_index: The first deploy step in the list to execute. This
- is the index (from 0) into the list of deploy steps in the node's
- driver_internal_info['deploy_steps']. Is None if there are no steps
- to execute.
- """
- node = task.node
- if step_index is None:
- steps = []
- else:
- steps = node.driver_internal_info['deploy_steps'][step_index:]
-
- LOG.info('Executing %(state)s on node %(node)s, remaining steps: '
- '%(steps)s', {'node': node.uuid, 'steps': steps,
- 'state': node.provision_state})
-
- # Execute each step until we hit an async step or run out of steps
- for ind, step in enumerate(steps):
- # Save which step we're about to start so we can restart
- # if necessary
- node.deploy_step = step
- driver_internal_info = node.driver_internal_info
- driver_internal_info['deploy_step_index'] = step_index + ind
- node.driver_internal_info = driver_internal_info
- node.save()
- interface = getattr(task.driver, step.get('interface'))
- LOG.info('Executing %(step)s on node %(node)s',
- {'step': step, 'node': node.uuid})
- try:
- result = interface.execute_deploy_step(task, step)
- except exception.IronicException as e:
- if isinstance(e, exception.AgentConnectionFailed):
- if task.node.driver_internal_info.get('deployment_reboot'):
- LOG.info('Agent is not yet running on node %(node)s after '
- 'deployment reboot, waiting for agent to come up '
- 'to run next deploy step %(step)s.',
- {'node': node.uuid, 'step': step})
- driver_internal_info['skip_current_deploy_step'] = False
- node.driver_internal_info = driver_internal_info
- task.process_event('wait')
- return
- log_msg = ('Node %(node)s failed deploy step %(step)s. Error: '
- '%(err)s' %
- {'node': node.uuid, 'step': node.deploy_step, 'err': e})
- utils.deploying_error_handler(
- task, log_msg,
- _("Failed to deploy: %s") % node.deploy_step)
- return
- except Exception as e:
- log_msg = ('Node %(node)s failed deploy step %(step)s with '
- 'unexpected error: %(err)s' %
- {'node': node.uuid, 'step': node.deploy_step, 'err': e})
- utils.deploying_error_handler(
- task, log_msg,
- _("Failed to deploy. Exception: %s") % e, traceback=True)
- return
-
- if ind == 0:
- # We've done the very first deploy step.
- # Update conductor_affinity to reference this conductor's ID
- # since there may be local persistent state
- node.conductor_affinity = conductor_id
- node.save()
-
- # Check if the step is done or not. The step should return
- # states.DEPLOYWAIT if the step is still being executed, or
- # None if the step is done.
- # NOTE(deva): Some drivers may return states.DEPLOYWAIT
- # eg. if they are waiting for a callback
- if result == states.DEPLOYWAIT:
- # Kill this worker, the async step will make an RPC call to
- # continue_node_deploy() to continue deploying
- LOG.info('Deploy step %(step)s on node %(node)s being '
- 'executed asynchronously, waiting for driver.',
- {'node': node.uuid, 'step': step})
- task.process_event('wait')
- return
- elif result is not None:
- # NOTE(rloo): This is an internal/dev error; shouldn't happen.
- log_msg = (_('While executing deploy step %(step)s on node '
- '%(node)s, step returned unexpected state: %(val)s')
- % {'step': step, 'node': node.uuid, 'val': result})
- utils.deploying_error_handler(
- task, log_msg,
- _("Failed to deploy: %s") % node.deploy_step)
- return
-
- LOG.info('Node %(node)s finished deploy step %(step)s',
- {'node': node.uuid, 'step': step})
-
- # Finished executing the steps. Clear deploy_step.
- node.deploy_step = None
- driver_internal_info = node.driver_internal_info
- driver_internal_info['deploy_steps'] = None
- driver_internal_info.pop('deploy_step_index', None)
- driver_internal_info.pop('deployment_reboot', None)
- driver_internal_info.pop('deployment_polling', None)
- # Remove the agent_url cached from the deployment.
- driver_internal_info.pop('agent_url', None)
- node.driver_internal_info = driver_internal_info
- node.save()
-
- _start_console_in_deploy(task)
-
- task.process_event('done')
- LOG.info('Successfully deployed node %(node)s with '
- 'instance %(instance)s.',
- {'node': node.uuid, 'instance': node.instance_uuid})
-
-
-def _start_console_in_deploy(task):
- """Start console at the end of deployment.
-
- Console is stopped at tearing down not to be exposed to an instance user.
- Then, restart at deployment.
-
- :param task: a TaskManager instance with an exclusive lock
- """
-
- if not task.node.console_enabled:
- return
-
- notify_utils.emit_console_notification(
- task, 'console_restore', fields.NotificationStatus.START)
- try:
- task.driver.console.start_console(task)
- except Exception as err:
- msg = (_('Failed to start console while deploying the '
- 'node %(node)s: %(err)s.') % {'node': task.node.uuid,
- 'err': err})
- LOG.error(msg)
- task.node.last_error = msg
- task.node.console_enabled = False
- task.node.save()
- notify_utils.emit_console_notification(
- task, 'console_restore', fields.NotificationStatus.ERROR)
- else:
- notify_utils.emit_console_notification(
- task, 'console_restore', fields.NotificationStatus.END)
-
-
@task_manager.require_exclusive_lock
def handle_sync_power_state_max_retries_exceeded(task, actual_power_state,
exception=None):
diff --git a/ironic/conductor/utils.py b/ironic/conductor/utils.py
index 907b2f991..9a64151d3 100644
--- a/ironic/conductor/utils.py
+++ b/ironic/conductor/utils.py
@@ -943,3 +943,65 @@ def remove_agent_url(node):
info = node.driver_internal_info
info.pop('agent_url', None)
node.driver_internal_info = info
+
+
+def _get_node_next_steps(task, step_type, skip_current_step=True):
+ """Get the task's node's next steps.
+
+ This determines what the next (remaining) steps are, and
+ returns the index into the steps list that corresponds to the
+ next step. The remaining steps are determined as follows:
+
+ * If no steps have been started yet, all the steps
+ must be executed
+ * If skip_current_step is False, the remaining steps start
+ with the current step. Otherwise, the remaining steps
+ start with the step after the current one.
+
+ All the steps are in node.driver_internal_info['<step_type>_steps'].
+ node.<step_type>_step is the current step that was just executed
+ (or None, {} if no steps have been executed yet).
+ node.driver_internal_info['<step_type>_step_index'] is the index
+ index into the steps list (or None, doesn't exist if no steps have
+ been executed yet) and corresponds to node.<step_type>_step.
+
+ :param task: A TaskManager object
+ :param step_type: The type of steps to process: 'clean' or 'deploy'.
+ :param skip_current_step: True to skip the current step; False to
+ include it.
+ :returns: index of the next step; None if there are none to execute.
+
+ """
+ valid_types = set(['clean', 'deploy'])
+ if step_type not in valid_types:
+ # NOTE(rloo): No need to i18n this, since this would be a
+ # developer error; it isn't user-facing.
+ raise exception.Invalid(
+ 'step_type must be one of %(valid)s, not %(step)s'
+ % {'valid': valid_types, 'step': step_type})
+ node = task.node
+ if not getattr(node, '%s_step' % step_type):
+ # first time through, all steps need to be done. Return the
+ # index of the first step in the list.
+ return 0
+
+ ind = node.driver_internal_info.get('%s_step_index' % step_type)
+ if ind is None:
+ return None
+
+ if skip_current_step:
+ ind += 1
+ if ind >= len(node.driver_internal_info['%s_steps' % step_type]):
+ # no steps left to do
+ ind = None
+ return ind
+
+
+def get_node_next_clean_steps(task, skip_current_step=True):
+ return _get_node_next_steps(task, 'clean',
+ skip_current_step=skip_current_step)
+
+
+def get_node_next_deploy_steps(task, skip_current_step=True):
+ return _get_node_next_steps(task, 'deploy',
+ skip_current_step=skip_current_step)
diff --git a/ironic/db/api.py b/ironic/db/api.py
index 9ad5e2979..33561296e 100644
--- a/ironic/db/api.py
+++ b/ironic/db/api.py
@@ -56,6 +56,7 @@ class Connection(object, metaclass=abc.ABCMeta):
:reserved: True | False
:reserved_by_any_of: [conductor1, conductor2]
:maintenance: True | False
+ :retired: True | False
:chassis_uuid: uuid of chassis
:driver: driver's name
:provision_state: provision state of node
diff --git a/ironic/db/sqlalchemy/alembic/versions/cd2c80feb331_add_node_retired_field.py b/ironic/db/sqlalchemy/alembic/versions/cd2c80feb331_add_node_retired_field.py
new file mode 100644
index 000000000..027e7659d
--- /dev/null
+++ b/ironic/db/sqlalchemy/alembic/versions/cd2c80feb331_add_node_retired_field.py
@@ -0,0 +1,33 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""add nodes.retired field
+
+Revision ID: cd2c80feb331
+Revises: ce6c4b3cf5a2
+Create Date: 2020-01-16 12:51:13.866882
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'cd2c80feb331'
+down_revision = 'ce6c4b3cf5a2'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('retired', sa.Boolean(), nullable=True,
+ server_default=sa.false()))
+ op.add_column('nodes', sa.Column('retired_reason', sa.Text(),
+ nullable=True))
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index 8951eff3b..f072856bb 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -149,6 +149,12 @@ def add_port_filter_by_node(query, value):
return query.filter(models.Node.uuid == value)
+def add_port_filter_by_node_owner(query, value):
+ query = query.join(models.Node,
+ models.Port.node_id == models.Node.id)
+ return query.filter(models.Node.owner == value)
+
+
def add_portgroup_filter(query, value):
"""Adds a portgroup-specific filter to a query.
@@ -279,9 +285,10 @@ def _zip_matching(a, b, key):
class Connection(api.Connection):
"""SqlAlchemy connection."""
- _NODE_QUERY_FIELDS = {'console_enabled', 'maintenance', 'driver',
- 'resource_class', 'provision_state', 'uuid', 'id',
- 'fault', 'conductor_group', 'owner'}
+ _NODE_QUERY_FIELDS = {'console_enabled', 'maintenance', 'retired',
+ 'driver', 'resource_class', 'provision_state',
+ 'uuid', 'id', 'fault', 'conductor_group',
+ 'owner'}
_NODE_IN_QUERY_FIELDS = {'%s_in' % field: field
for field in ('uuid', 'provision_state')}
_NODE_NON_NULL_FILTERS = {'associated': 'instance_uuid',
@@ -671,29 +678,38 @@ class Connection(api.Connection):
except NoResultFound:
raise exception.PortNotFound(port=port_uuid)
- def get_port_by_address(self, address):
+ def get_port_by_address(self, address, owner=None):
query = model_query(models.Port).filter_by(address=address)
+ if owner:
+ query = add_port_filter_by_node_owner(query, owner)
try:
return query.one()
except NoResultFound:
raise exception.PortNotFound(port=address)
def get_port_list(self, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, owner=None):
+ query = model_query(models.Port)
+ if owner:
+ query = add_port_filter_by_node_owner(query, owner)
return _paginate_query(models.Port, limit, marker,
- sort_key, sort_dir)
+ sort_key, sort_dir, query)
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, owner=None):
query = model_query(models.Port)
query = query.filter_by(node_id=node_id)
+ if owner:
+ query = add_port_filter_by_node_owner(query, owner)
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
def get_ports_by_portgroup_id(self, portgroup_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, owner=None):
query = model_query(models.Port)
query = query.filter_by(portgroup_id=portgroup_id)
+ if owner:
+ query = add_port_filter_by_node_owner(query, owner)
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py
index dc5127e3e..863183bf5 100644
--- a/ironic/db/sqlalchemy/models.py
+++ b/ironic/db/sqlalchemy/models.py
@@ -193,6 +193,9 @@ class Node(Base):
network_interface = Column(String(255), nullable=True)
raid_interface = Column(String(255), nullable=True)
rescue_interface = Column(String(255), nullable=True)
+ retired = Column(Boolean, nullable=True, default=False,
+ server_default=false())
+ retired_reason = Column(Text, nullable=True)
storage_interface = Column(String(255), nullable=True)
power_interface = Column(String(255), nullable=True)
vendor_interface = Column(String(255), nullable=True)
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index 3b5c733c7..9e26e0c4b 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -857,10 +857,31 @@ def get_boot_option(node):
:returns: A string representing the boot option type. Defaults to
'netboot'.
"""
+
+ # NOTE(TheJulia): Software raid always implies local deployment
+ if is_software_raid(node):
+ return 'local'
capabilities = utils.parse_instance_info_capabilities(node)
return capabilities.get('boot_option', get_default_boot_option()).lower()
+def is_software_raid(node):
+ """Determine if software raid is in use for the deployment.
+
+ :param node: A single Node.
+ :returns: A boolean value of True when software raid is in use,
+ otherwise False
+ """
+ target_raid_config = node.target_raid_config
+ logical_disks = target_raid_config.get('logical_disks', [])
+ software_raid = False
+ for logical_disk in logical_disks:
+ if logical_disk.get('controller') == 'software':
+ software_raid = True
+ break
+ return software_raid
+
+
def build_agent_options(node):
"""Build the options to be passed to the agent ramdisk.
diff --git a/ironic/objects/node.py b/ironic/objects/node.py
index c3d6e0104..73a257586 100644
--- a/ironic/objects/node.py
+++ b/ironic/objects/node.py
@@ -73,7 +73,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
# Version 1.30: Add owner field
# Version 1.31: Add allocation_id field
# Version 1.32: Add description field
- VERSION = '1.32'
+ # Version 1.33: Add retired and retired_reason fields
+ VERSION = '1.33'
dbapi = db_api.get_instance()
@@ -159,6 +160,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
'traits': object_fields.ObjectField('TraitList', nullable=True),
'owner': object_fields.StringField(nullable=True),
'description': object_fields.StringField(nullable=True),
+ 'retired': objects.fields.BooleanField(nullable=True),
+ 'retired_reason': object_fields.StringField(nullable=True),
}
def as_dict(self, secure=False):
@@ -595,6 +598,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
should be set to None (or removed).
Version 1.32: description was added. For versions prior to this, it
should be set to None (or removed).
+ Version 1.33: retired was added. For versions prior to this, it
+ should be set to False (or removed).
:param target_version: the desired version of the object
:param remove_unavailable_fields: True to remove fields that are
@@ -608,7 +613,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
fields = [('rescue_interface', 22), ('traits', 23),
('bios_interface', 24), ('fault', 25),
('automated_clean', 28), ('protected_reason', 29),
- ('owner', 30), ('allocation_id', 31), ('description', 32)]
+ ('owner', 30), ('allocation_id', 31), ('description', 32),
+ ('retired_reason', 33)]
for name, minor in fields:
self._adjust_field_to_version(name, None, target_version,
1, minor, remove_unavailable_fields)
@@ -622,6 +628,9 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
self._convert_conductor_group_field(target_version,
remove_unavailable_fields)
+ self._adjust_field_to_version('retired', False, target_version,
+ 1, 33, remove_unavailable_fields)
+
@base.IronicObjectRegistry.register
class NodePayload(notification.NotificationPayloadBase):
@@ -671,6 +680,8 @@ class NodePayload(notification.NotificationPayloadBase):
'provision_state': ('node', 'provision_state'),
'provision_updated_at': ('node', 'provision_updated_at'),
'resource_class': ('node', 'resource_class'),
+ 'retired': ('node', 'retired'),
+ 'retired_reason': ('node', 'retired_reason'),
'target_power_state': ('node', 'target_power_state'),
'target_provision_state': ('node', 'target_provision_state'),
'updated_at': ('node', 'updated_at'),
@@ -692,7 +703,8 @@ class NodePayload(notification.NotificationPayloadBase):
# Version 1.11: Add protected and protected_reason fields exposed via API.
# Version 1.12: Add node owner field.
# Version 1.13: Add description field.
- VERSION = '1.13'
+ # Version 1.14: Add retired and retired_reason fields exposed via API.
+ VERSION = '1.14'
fields = {
'clean_step': object_fields.FlexibleDictField(nullable=True),
'conductor_group': object_fields.StringField(nullable=True),
@@ -730,6 +742,8 @@ class NodePayload(notification.NotificationPayloadBase):
'provision_state': object_fields.StringField(nullable=True),
'provision_updated_at': object_fields.DateTimeField(nullable=True),
'resource_class': object_fields.StringField(nullable=True),
+ 'retired': object_fields.BooleanField(nullable=True),
+ 'retired_reason': object_fields.StringField(nullable=True),
'target_power_state': object_fields.StringField(nullable=True),
'target_provision_state': object_fields.StringField(nullable=True),
'traits': object_fields.ListOfStringsField(nullable=True),
@@ -776,7 +790,8 @@ class NodeSetPowerStatePayload(NodePayload):
# Version 1.11: Parent NodePayload version 1.11
# Version 1.12: Parent NodePayload version 1.12
# Version 1.13: Parent NodePayload version 1.13
- VERSION = '1.13'
+ # Version 1.14: Parent NodePayload version 1.14
+ VERSION = '1.14'
fields = {
# "to_power" indicates the future target_power_state of the node. A
@@ -830,7 +845,8 @@ class NodeCorrectedPowerStatePayload(NodePayload):
# Version 1.11: Parent NodePayload version 1.11
# Version 1.12: Parent NodePayload version 1.12
# Version 1.13: Parent NodePayload version 1.13
- VERSION = '1.13'
+ # Version 1.14: Parent NodePayload version 1.14
+ VERSION = '1.14'
fields = {
'from_power': object_fields.StringField(nullable=True)
@@ -868,7 +884,8 @@ class NodeSetProvisionStatePayload(NodePayload):
# Version 1.11: Parent NodePayload version 1.11
# Version 1.12: Parent NodePayload version 1.12
# Version 1.13: Parent NodePayload version 1.13
- VERSION = '1.13'
+ # Version 1.14: Parent NodePayload version 1.14
+ VERSION = '1.14'
SCHEMA = dict(NodePayload.SCHEMA,
**{'instance_info': ('node', 'instance_info')})
@@ -913,7 +930,8 @@ class NodeCRUDPayload(NodePayload):
# Version 1.9: Parent NodePayload version 1.11
# Version 1.10: Parent NodePayload version 1.12
# Version 1.11: Parent NodePayload version 1.13
- VERSION = '1.11'
+ # Version 1.12: Parent NodePayload version 1.14
+ VERSION = '1.12'
SCHEMA = dict(NodePayload.SCHEMA,
**{'instance_info': ('node', 'instance_info'),
diff --git a/ironic/objects/port.py b/ironic/objects/port.py
index 7bc829dfa..6c75c8c21 100644
--- a/ironic/objects/port.py
+++ b/ironic/objects/port.py
@@ -203,17 +203,18 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
- def get_by_address(cls, context, address):
+ def get_by_address(cls, context, address, owner=None):
"""Find a port based on address and return a :class:`Port` object.
:param cls: the :class:`Port`
:param context: Security context
:param address: the address of a port.
+ :param owner: a node owner to match against
:returns: a :class:`Port` object.
:raises: PortNotFound
"""
- db_port = cls.dbapi.get_port_by_address(address)
+ db_port = cls.dbapi.get_port_by_address(address, owner=owner)
port = cls._from_db_object(context, cls(), db_port)
return port
@@ -223,7 +224,7 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, owner=None):
"""Return a list of Port objects.
:param context: Security context.
@@ -231,6 +232,7 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
+ :param owner: a node owner to match against
:returns: a list of :class:`Port` object.
:raises: InvalidParameterValue
@@ -238,7 +240,8 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
db_ports = cls.dbapi.get_port_list(limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ owner=owner)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
@@ -247,7 +250,7 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, owner=None):
"""Return a list of Port objects associated with a given node ID.
:param context: Security context.
@@ -256,13 +259,15 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
+ :param owner: a node owner to match against
:returns: a list of :class:`Port` object.
"""
db_ports = cls.dbapi.get_ports_by_node_id(node_id, limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ owner=owner)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
@@ -271,7 +276,8 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# @object_base.remotable_classmethod
@classmethod
def list_by_portgroup_id(cls, context, portgroup_id, limit=None,
- marker=None, sort_key=None, sort_dir=None):
+ marker=None, sort_key=None, sort_dir=None,
+ owner=None):
"""Return a list of Port objects associated with a given portgroup ID.
:param context: Security context.
@@ -280,6 +286,7 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
+ :param owner: a node owner to match against
:returns: a list of :class:`Port` object.
"""
@@ -287,7 +294,8 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ owner=owner)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
diff --git a/ironic/tests/unit/api/controllers/v1/test_deploy_template.py b/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
index 96fd16ced..f45ec138d 100644
--- a/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
+++ b/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
@@ -360,7 +360,7 @@ class TestPatch(BaseDeployTemplatesAPITest):
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
- self.assertIn(error_msg, response.json['error_message'])
+ self.assertRegex(response.json['error_message'], error_msg)
self.assertFalse(mock_save.called)
return response
@@ -538,7 +538,8 @@ class TestPatch(BaseDeployTemplatesAPITest):
}
patch = [{'path': '/steps/1', 'op': 'replace', 'value': step}]
self._test_update_bad_request(
- mock_save, patch, "list assignment index out of range")
+ mock_save, patch, "list assignment index out of range|"
+ "can't replace outside of list")
def test_replace_empty_step_list_fail(self, mock_save):
patch = [{'path': '/steps', 'op': 'replace', 'value': []}]
@@ -654,7 +655,7 @@ class TestPatch(BaseDeployTemplatesAPITest):
def test_add_root_non_existent(self, mock_save):
patch = [{'path': '/foo', 'value': 'bar', 'op': 'add'}]
self._test_update_bad_request(
- mock_save, patch, "Adding a new attribute (/foo)")
+ mock_save, patch, "Adding a new attribute \(/foo\)")
def test_add_too_high_index_step_fail(self, mock_save):
step = {
diff --git a/ironic/tests/unit/api/controllers/v1/test_expose.py b/ironic/tests/unit/api/controllers/v1/test_expose.py
index eee02036d..b68a2b2bf 100644
--- a/ironic/tests/unit/api/controllers/v1/test_expose.py
+++ b/ironic/tests/unit/api/controllers/v1/test_expose.py
@@ -54,6 +54,8 @@ class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
('api_utils.check_node_policy_and_retrieve' in src) or
('api_utils.check_node_list_policy' in src) or
('self._get_node_and_topic' in src) or
+ ('api_utils.check_port_policy_and_retrieve' in src) or
+ ('api_utils.check_port_list_policy' in src) or
('policy.authorize' in src and
'context.to_policy_values' in src),
'no policy check found in in exposed '
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index f1474f225..7bf9e89d6 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -133,6 +133,8 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertNotIn('protected', data['nodes'][0])
self.assertNotIn('protected_reason', data['nodes'][0])
self.assertNotIn('owner', data['nodes'][0])
+ self.assertNotIn('retired', data['nodes'][0])
+ self.assertNotIn('retired_reason', data['nodes'][0])
def test_get_one(self):
node = obj_utils.create_test_node(self.context,
@@ -353,6 +355,33 @@ class TestListNodes(test_api_base.BaseApiTest):
headers={api_base.Version.string: '1.51'})
self.assertIsNone(data['description'])
+ def test_node_retired_hidden_in_lower_version(self):
+ self._test_node_field_hidden_in_lower_version('retired',
+ '1.60', '1.61')
+
+ def test_node_retired_reason_hidden_in_lower_version(self):
+ self._test_node_field_hidden_in_lower_version('retired_reason',
+ '1.60', '1.61')
+
+ def test_node_retired(self):
+ for value in (True, False):
+ node = obj_utils.create_test_node(self.context, retired=value,
+ provision_state='active',
+ uuid=uuidutils.generate_uuid())
+ data = self.get_json('/nodes/%s' % node.uuid,
+ headers={api_base.Version.string: '1.61'})
+ self.assertIs(data['retired'], value)
+ self.assertIsNone(data['retired_reason'])
+
+ def test_node_retired_with_reason(self):
+ node = obj_utils.create_test_node(self.context, retired=True,
+ provision_state='active',
+ retired_reason='warranty expired')
+ data = self.get_json('/nodes/%s' % node.uuid,
+ headers={api_base.Version.string: '1.61'})
+ self.assertTrue(data['retired'])
+ self.assertEqual('warranty expired', data['retired_reason'])
+
def test_get_one_custom_fields(self):
node = obj_utils.create_test_node(self.context,
chassis_id=self.chassis.id)
@@ -568,6 +597,14 @@ class TestListNodes(test_api_base.BaseApiTest):
headers={api_base.Version.string: '1.52'})
self.assertEqual(allocation.uuid, response['allocation_uuid'])
+ def test_get_retired_fields(self):
+ node = obj_utils.create_test_node(self.context,
+ retired=True)
+ response = self.get_json('/nodes/%s?fields=%s' %
+ (node.uuid, 'retired'),
+ headers={api_base.Version.string: '1.61'})
+ self.assertIn('retired', response)
+
def test_detail(self):
node = obj_utils.create_test_node(self.context,
chassis_id=self.chassis.id)
@@ -606,6 +643,8 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertNotIn('chassis_id', data['nodes'][0])
self.assertNotIn('allocation_id', data['nodes'][0])
self.assertIn('allocation_uuid', data['nodes'][0])
+ self.assertIn('retired', data['nodes'][0])
+ self.assertIn('retired_reason', data['nodes'][0])
def test_detail_using_query(self):
node = obj_utils.create_test_node(self.context,
@@ -641,6 +680,8 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertIn(field, data['nodes'][0])
# never expose the chassis_id
self.assertNotIn('chassis_id', data['nodes'][0])
+ self.assertIn('retired', data['nodes'][0])
+ self.assertIn('retired_reason', data['nodes'][0])
def test_detail_query_false(self):
obj_utils.create_test_node(self.context)
@@ -3290,6 +3331,66 @@ class TestPatch(test_api_base.BaseApiTest):
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
+ def test_update_retired(self):
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ provision_state='active')
+ self.mock_update_node.return_value = node
+ headers = {api_base.Version.string: '1.61'}
+ response = self.patch_json('/nodes/%s' % node.uuid,
+ [{'path': '/retired',
+ 'value': True,
+ 'op': 'replace'}],
+ headers=headers)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(http_client.OK, response.status_code)
+
+ def test_update_retired_with_reason(self):
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ provision_state='active')
+ self.mock_update_node.return_value = node
+ headers = {api_base.Version.string: '1.61'}
+ response = self.patch_json('/nodes/%s' % node.uuid,
+ [{'path': '/retired',
+ 'value': True,
+ 'op': 'replace'},
+ {'path': '/retired_reason',
+ 'value': 'a better reason',
+ 'op': 'replace'}],
+ headers=headers)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(http_client.OK, response.status_code)
+
+ def test_update_retired_reason(self):
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ provision_state='active',
+ retired=True)
+ self.mock_update_node.return_value = node
+ headers = {api_base.Version.string: '1.61'}
+ response = self.patch_json('/nodes/%s' % node.uuid,
+ [{'path': '/retired_reason',
+ 'value': 'a better reason',
+ 'op': 'replace'}],
+ headers=headers)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(http_client.OK, response.status_code)
+
+ def test_update_retired_old_api(self):
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid())
+ self.mock_update_node.return_value = node
+ headers = {api_base.Version.string: '1.60'}
+ response = self.patch_json('/nodes/%s' % node.uuid,
+ [{'path': '/retired',
+ 'value': True,
+ 'op': 'replace'}],
+ headers=headers,
+ expect_errors=True)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
+
def _create_node_locally(node):
driver_factory.check_and_update_node_interfaces(node)
diff --git a/ironic/tests/unit/api/controllers/v1/test_port.py b/ironic/tests/unit/api/controllers/v1/test_port.py
index 51a84209b..9870c2e04 100644
--- a/ironic/tests/unit/api/controllers/v1/test_port.py
+++ b/ironic/tests/unit/api/controllers/v1/test_port.py
@@ -32,6 +32,7 @@ from ironic.api.controllers.v1 import port as api_port
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import versions
from ironic.common import exception
+from ironic.common import policy
from ironic.common import states
from ironic.common import utils as common_utils
from ironic.conductor import rpcapi
@@ -189,7 +190,7 @@ class TestListPorts(test_api_base.BaseApiTest):
def setUp(self):
super(TestListPorts, self).setUp()
- self.node = obj_utils.create_test_node(self.context)
+ self.node = obj_utils.create_test_node(self.context, owner='12345')
def test_empty(self):
data = self.get_json('/ports')
@@ -250,6 +251,42 @@ class TestListPorts(test_api_base.BaseApiTest):
self.assertEqual(port.uuid, data['ports'][0]["uuid"])
self.assertIsNone(data['ports'][0]["portgroup_uuid"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_list_non_admin_forbidden(self, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ raise exception.HTTPForbidden(resource='fake')
+ mock_authorize.side_effect = mock_authorize_function
+
+ address_template = "aa:bb:cc:dd:ee:f%d"
+ for id_ in range(3):
+ obj_utils.create_test_port(self.context,
+ node_id=self.node.id,
+ uuid=uuidutils.generate_uuid(),
+ address=address_template % id_)
+
+ response = self.get_json('/ports',
+ headers={'X-Project-Id': '12345'},
+ expect_errors=True)
+ self.assertEqual(http_client.FORBIDDEN, response.status_int)
+
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_list_non_admin_forbidden_no_project(self, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+
+ address_template = "aa:bb:cc:dd:ee:f%d"
+ for id_ in range(3):
+ obj_utils.create_test_port(self.context,
+ node_id=self.node.id,
+ uuid=uuidutils.generate_uuid(),
+ address=address_template % id_)
+
+ response = self.get_json('/ports', expect_errors=True)
+ self.assertEqual(http_client.FORBIDDEN, response.status_int)
+
def test_get_one(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
data = self.get_json('/ports/%s' % port.uuid)
@@ -581,6 +618,33 @@ class TestListPorts(test_api_base.BaseApiTest):
uuids = [n['uuid'] for n in data['ports']]
self.assertCountEqual(ports, uuids)
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_many_non_admin(self, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+
+ ports = []
+ # these ports should be retrieved by the API call
+ for id_ in range(0, 2):
+ port = obj_utils.create_test_port(
+ self.context, node_id=self.node.id,
+ uuid=uuidutils.generate_uuid(),
+ address='52:54:00:cf:2d:3%s' % id_)
+ ports.append(port.uuid)
+ # these ports should NOT be retrieved by the API call
+ for id_ in range(3, 5):
+ port = obj_utils.create_test_port(
+ self.context, uuid=uuidutils.generate_uuid(),
+ address='52:54:00:cf:2d:3%s' % id_)
+ data = self.get_json('/ports', headers={'X-Project-Id': '12345'})
+ self.assertEqual(len(ports), len(data['ports']))
+
+ uuids = [n['uuid'] for n in data['ports']]
+ self.assertCountEqual(ports, uuids)
+
def _test_links(self, public_url=None):
cfg.CONF.set_override('public_endpoint', public_url, 'api')
uuid = uuidutils.generate_uuid()
@@ -686,6 +750,47 @@ class TestListPorts(test_api_base.BaseApiTest):
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_address, response.json['error_message'])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_port_by_address_non_admin(self, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+
+ address_template = "aa:bb:cc:dd:ee:f%d"
+ for id_ in range(3):
+ obj_utils.create_test_port(self.context,
+ node_id=self.node.id,
+ uuid=uuidutils.generate_uuid(),
+ address=address_template % id_)
+
+ target_address = address_template % 1
+ data = self.get_json('/ports?address=%s' % target_address,
+ headers={'X-Project-Id': '12345'})
+ self.assertThat(data['ports'], matchers.HasLength(1))
+ self.assertEqual(target_address, data['ports'][0]['address'])
+
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_port_by_address_non_admin_no_match(self, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+
+ address_template = "aa:bb:cc:dd:ee:f%d"
+ for id_ in range(3):
+ obj_utils.create_test_port(self.context,
+ node_id=self.node.id,
+ uuid=uuidutils.generate_uuid(),
+ address=address_template % id_)
+
+ target_address = address_template % 1
+ data = self.get_json('/ports?address=%s' % target_address,
+ headers={'X-Project-Id': '54321'})
+ self.assertThat(data['ports'], matchers.HasLength(0))
+
def test_sort_key(self):
ports = []
for id_ in range(3):
@@ -765,6 +870,60 @@ class TestListPorts(test_api_base.BaseApiTest):
headers={api_base.Version.string: '1.5'})
self.assertEqual(3, len(data['ports']))
+ @mock.patch.object(policy, 'authorize', spec=True)
+ @mock.patch.object(api_utils, 'get_rpc_node')
+ def test_get_all_by_node_name_non_admin(
+ self, mock_get_rpc_node, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+ mock_get_rpc_node.return_value = self.node
+
+ for i in range(5):
+ if i < 3:
+ node_id = self.node.id
+ else:
+ node_id = 100000 + i
+ obj_utils.create_test_port(self.context,
+ node_id=node_id,
+ uuid=uuidutils.generate_uuid(),
+ address='52:54:00:cf:2d:3%s' % i)
+ data = self.get_json("/ports?node=%s" % 'test-node',
+ headers={
+ api_base.Version.string: '1.5',
+ 'X-Project-Id': '12345'
+ })
+ self.assertEqual(3, len(data['ports']))
+
+ @mock.patch.object(policy, 'authorize', spec=True)
+ @mock.patch.object(api_utils, 'get_rpc_node')
+ def test_get_all_by_node_name_non_admin_no_match(
+ self, mock_get_rpc_node, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+ mock_get_rpc_node.return_value = self.node
+
+ for i in range(5):
+ if i < 3:
+ node_id = self.node.id
+ else:
+ node_id = 100000 + i
+ obj_utils.create_test_port(self.context,
+ node_id=node_id,
+ uuid=uuidutils.generate_uuid(),
+ address='52:54:00:cf:2d:3%s' % i)
+ data = self.get_json("/ports?node=%s" % 'test-node',
+ headers={
+ api_base.Version.string: '1.5',
+ 'X-Project-Id': '54321'
+ })
+ self.assertEqual(0, len(data['ports']))
+
@mock.patch.object(api_utils, 'get_rpc_node')
def test_get_all_by_node_uuid_and_name(self, mock_get_rpc_node):
# GET /v1/ports specifying node and uuid - should only use node_uuid
@@ -832,6 +991,48 @@ class TestListPorts(test_api_base.BaseApiTest):
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_get_all_by_portgroup_uuid_non_admin(self, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+
+ pg = obj_utils.create_test_portgroup(self.context,
+ node_id=self.node.id)
+ port = obj_utils.create_test_port(self.context, node_id=self.node.id,
+ portgroup_id=pg.id)
+ data = self.get_json('/ports/detail?portgroup=%s' % pg.uuid,
+ headers={
+ api_base.Version.string: '1.24',
+ 'X-Project-Id': '12345'
+ })
+
+ self.assertEqual(port.uuid, data['ports'][0]['uuid'])
+ self.assertEqual(pg.uuid,
+ data['ports'][0]['portgroup_uuid'])
+
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_get_all_by_portgroup_uuid_non_admin_no_match(
+ self, mock_authorize):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+
+ pg = obj_utils.create_test_portgroup(self.context)
+ obj_utils.create_test_port(self.context, node_id=self.node.id,
+ portgroup_id=pg.id)
+ data = self.get_json('/ports/detail?portgroup=%s' % pg.uuid,
+ headers={
+ api_base.Version.string: '1.24',
+ 'X-Project-Id': '54321'
+ })
+
+ self.assertThat(data['ports'], matchers.HasLength(0))
+
def test_get_all_by_portgroup_name(self):
pg = obj_utils.create_test_portgroup(self.context,
node_id=self.node.id)
diff --git a/ironic/tests/unit/api/controllers/v1/test_utils.py b/ironic/tests/unit/api/controllers/v1/test_utils.py
index 80f74936d..68e8a7f47 100644
--- a/ironic/tests/unit/api/controllers/v1/test_utils.py
+++ b/ironic/tests/unit/api/controllers/v1/test_utils.py
@@ -114,6 +114,7 @@ class TestApiUtils(base.TestCase):
doc = []
patch = [{"op": "replace", "path": "/0", "value": 42}]
self.assertRaisesRegex(exception.PatchError,
+ "can't replace outside of list|"
"list assignment index out of range",
utils.apply_jsonpatch, doc, patch)
@@ -1030,3 +1031,159 @@ class TestCheckNodeListPolicy(base.TestCase):
utils.check_node_list_policy,
'54321'
)
+
+
+class TestCheckPortPolicyAndRetrieve(base.TestCase):
+ def setUp(self):
+ super(TestCheckPortPolicyAndRetrieve, self).setUp()
+ self.valid_port_uuid = uuidutils.generate_uuid()
+ self.node = test_api_utils.post_get_test_node()
+ self.node['owner'] = '12345'
+ self.port = objects.Port(self.context, node_id=42)
+
+ @mock.patch.object(api, 'request', spec_set=["context", "version"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ @mock.patch.object(objects.Port, 'get_by_uuid')
+ @mock.patch.object(objects.Node, 'get_by_id')
+ def test_check_port_policy_and_retrieve(
+ self, mock_ngbi, mock_pgbu, mock_authorize, mock_pr
+ ):
+ mock_pr.version.minor = 50
+ mock_pr.context.to_policy_values.return_value = {}
+ mock_pgbu.return_value = self.port
+ mock_ngbi.return_value = self.node
+
+ rpc_port, rpc_node = utils.check_port_policy_and_retrieve(
+ 'fake_policy', self.valid_port_uuid
+ )
+ mock_pgbu.assert_called_once_with(mock_pr.context,
+ self.valid_port_uuid)
+ mock_ngbi.assert_called_once_with(mock_pr.context, 42)
+ mock_authorize.assert_called_once_with(
+ 'fake_policy', {'node.owner': '12345'}, {})
+ self.assertEqual(self.port, rpc_port)
+ self.assertEqual(self.node, rpc_node)
+
+ @mock.patch.object(api, 'request', spec_set=["context"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ @mock.patch.object(objects.Port, 'get_by_uuid')
+ def test_check_port_policy_and_retrieve_no_port_policy_forbidden(
+ self, mock_pgbu, mock_authorize, mock_pr
+ ):
+ mock_pr.context.to_policy_values.return_value = {}
+ mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
+ mock_pgbu.side_effect = exception.PortNotFound(
+ port=self.valid_port_uuid)
+
+ self.assertRaises(
+ exception.HTTPForbidden,
+ utils.check_port_policy_and_retrieve,
+ 'fake-policy',
+ self.valid_port_uuid
+ )
+
+ @mock.patch.object(api, 'request', spec_set=["context"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ @mock.patch.object(objects.Port, 'get_by_uuid')
+ def test_check_port_policy_and_retrieve_no_port(
+ self, mock_pgbu, mock_authorize, mock_pr
+ ):
+ mock_pr.context.to_policy_values.return_value = {}
+ mock_pgbu.side_effect = exception.PortNotFound(
+ port=self.valid_port_uuid)
+
+ self.assertRaises(
+ exception.PortNotFound,
+ utils.check_port_policy_and_retrieve,
+ 'fake-policy',
+ self.valid_port_uuid
+ )
+
+ @mock.patch.object(api, 'request', spec_set=["context", "version"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ @mock.patch.object(objects.Port, 'get_by_uuid')
+ @mock.patch.object(objects.Node, 'get_by_id')
+ def test_check_port_policy_and_retrieve_policy_forbidden(
+ self, mock_ngbi, mock_pgbu, mock_authorize, mock_pr
+ ):
+ mock_pr.version.minor = 50
+ mock_pr.context.to_policy_values.return_value = {}
+ mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
+ mock_pgbu.return_value = self.port
+ mock_ngbi.return_value = self.node
+
+ self.assertRaises(
+ exception.HTTPForbidden,
+ utils.check_port_policy_and_retrieve,
+ 'fake-policy',
+ self.valid_port_uuid
+ )
+
+
+class TestCheckPortListPolicy(base.TestCase):
+ @mock.patch.object(api, 'request', spec_set=["context", "version"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_check_port_list_policy(
+ self, mock_authorize, mock_pr
+ ):
+ mock_pr.context.to_policy_values.return_value = {
+ 'project_id': '12345'
+ }
+ mock_pr.version.minor = 50
+
+ owner = utils.check_port_list_policy()
+ self.assertIsNone(owner)
+
+ @mock.patch.object(api, 'request', spec_set=["context", "version"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_check_port_list_policy_forbidden(
+ self, mock_authorize, mock_pr
+ ):
+ def mock_authorize_function(rule, target, creds):
+ raise exception.HTTPForbidden(resource='fake')
+ mock_authorize.side_effect = mock_authorize_function
+ mock_pr.context.to_policy_values.return_value = {
+ 'project_id': '12345'
+ }
+ mock_pr.version.minor = 50
+
+ self.assertRaises(
+ exception.HTTPForbidden,
+ utils.check_port_list_policy,
+ )
+
+ @mock.patch.object(api, 'request', spec_set=["context", "version"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_check_port_list_policy_forbidden_no_project(
+ self, mock_authorize, mock_pr
+ ):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+ mock_pr.context.to_policy_values.return_value = {}
+ mock_pr.version.minor = 50
+
+ self.assertRaises(
+ exception.HTTPForbidden,
+ utils.check_port_list_policy,
+ )
+
+ @mock.patch.object(api, 'request', spec_set=["context", "version"])
+ @mock.patch.object(policy, 'authorize', spec=True)
+ def test_check_port_list_policy_non_admin(
+ self, mock_authorize, mock_pr
+ ):
+ def mock_authorize_function(rule, target, creds):
+ if rule == 'baremetal:port:list_all':
+ raise exception.HTTPForbidden(resource='fake')
+ return True
+ mock_authorize.side_effect = mock_authorize_function
+ mock_pr.context.to_policy_values.return_value = {
+ 'project_id': '12345'
+ }
+ mock_pr.version.minor = 50
+
+ owner = utils.check_port_list_policy()
+ self.assertEqual(owner, '12345')
diff --git a/ironic/tests/unit/conductor/test_cleaning.py b/ironic/tests/unit/conductor/test_cleaning.py
new file mode 100644
index 000000000..f48fee047
--- /dev/null
+++ b/ironic/tests/unit/conductor/test_cleaning.py
@@ -0,0 +1,975 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for cleaning bits."""
+
+import mock
+from oslo_config import cfg
+from oslo_utils import uuidutils
+
+from ironic.common import exception
+from ironic.common import states
+from ironic.conductor import cleaning
+from ironic.conductor import steps as conductor_steps
+from ironic.conductor import task_manager
+from ironic.drivers.modules import fake
+from ironic.drivers.modules.network import flat as n_flat
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.objects import utils as obj_utils
+
+CONF = cfg.CONF
+
+
+class DoNodeCleanTestCase(db_base.DbTestCase):
+ def setUp(self):
+ super(DoNodeCleanTestCase, self).setUp()
+ self.config(automated_clean=True, group='conductor')
+ self.power_update = {
+ 'step': 'update_firmware', 'priority': 10, 'interface': 'power'}
+ self.deploy_update = {
+ 'step': 'update_firmware', 'priority': 10, 'interface': 'deploy'}
+ self.deploy_erase = {
+ 'step': 'erase_disks', 'priority': 20, 'interface': 'deploy'}
+ # Automated cleaning should be executed in this order
+ self.clean_steps = [self.deploy_erase, self.power_update,
+ self.deploy_update]
+ self.next_clean_step_index = 1
+ # Manual clean step
+ self.deploy_raid = {
+ 'step': 'build_raid', 'priority': 0, 'interface': 'deploy'}
+
+ def __do_node_clean_validate_fail(self, mock_validate, clean_steps=None):
+ # InvalidParameterValue should cause node to go to CLEANFAIL
+ mock_validate.side_effect = exception.InvalidParameterValue('error')
+ tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task, clean_steps=clean_steps)
+ node.refresh()
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ mock_validate.assert_called_once_with(mock.ANY, mock.ANY)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ def test__do_node_clean_automated_power_validate_fail(self, mock_validate):
+ self.__do_node_clean_validate_fail(mock_validate)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ def test__do_node_clean_manual_power_validate_fail(self, mock_validate):
+ self.__do_node_clean_validate_fail(mock_validate, clean_steps=[])
+
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ def test__do_node_clean_automated_network_validate_fail(self,
+ mock_validate):
+ self.__do_node_clean_validate_fail(mock_validate)
+
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ def test__do_node_clean_manual_network_validate_fail(self, mock_validate):
+ self.__do_node_clean_validate_fail(mock_validate, clean_steps=[])
+
+ @mock.patch.object(cleaning, 'LOG', autospec=True)
+ @mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
+ autospec=True)
+ @mock.patch.object(cleaning, 'do_next_clean_step', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeBIOS.cache_bios_settings',
+ autospec=True)
+ def _test__do_node_clean_cache_bios(self, mock_bios, mock_validate,
+ mock_prep, mock_next_step, mock_steps,
+ mock_log, clean_steps=None,
+ enable_unsupported=False,
+ enable_exception=False):
+ if enable_unsupported:
+ mock_bios.side_effect = exception.UnsupportedDriverExtension('')
+ elif enable_exception:
+ mock_bios.side_effect = exception.IronicException('test')
+ mock_prep.return_value = states.NOSTATE
+ tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task, clean_steps=clean_steps)
+ node.refresh()
+ mock_bios.assert_called_once_with(mock.ANY, task)
+ if clean_steps:
+ self.assertEqual(states.CLEANING, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ else:
+ self.assertEqual(states.CLEANING, node.provision_state)
+ self.assertEqual(states.AVAILABLE, node.target_provision_state)
+ mock_validate.assert_called_once_with(mock.ANY, task)
+ if enable_exception:
+ mock_log.exception.assert_called_once_with(
+ 'Caching of bios settings failed on node {}. '
+ 'Continuing with node cleaning.'
+ .format(node.uuid))
+
+ def test__do_node_clean_manual_cache_bios(self):
+ self._test__do_node_clean_cache_bios(clean_steps=[self.deploy_raid])
+
+ def test__do_node_clean_automated_cache_bios(self):
+ self._test__do_node_clean_cache_bios()
+
+ def test__do_node_clean_manual_cache_bios_exception(self):
+ self._test__do_node_clean_cache_bios(clean_steps=[self.deploy_raid],
+ enable_exception=True)
+
+ def test__do_node_clean_automated_cache_bios_exception(self):
+ self._test__do_node_clean_cache_bios(enable_exception=True)
+
+ def test__do_node_clean_manual_cache_bios_unsupported(self):
+ self._test__do_node_clean_cache_bios(clean_steps=[self.deploy_raid],
+ enable_unsupported=True)
+
+ def test__do_node_clean_automated_cache_bios_unsupported(self):
+ self._test__do_node_clean_cache_bios(enable_unsupported=True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ def test__do_node_clean_automated_disabled(self, mock_validate):
+ self.config(automated_clean=False, group='conductor')
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE,
+ last_error=None)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task)
+ node.refresh()
+
+ # Assert that the node was moved to available without cleaning
+ self.assertFalse(mock_validate.called)
+ self.assertEqual(states.AVAILABLE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_steps', node.driver_internal_info)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ def test__do_node_clean_automated_disabled_individual_enabled(
+ self, mock_network, mock_validate):
+ self.config(automated_clean=False, group='conductor')
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE,
+ last_error=None, automated_clean=True)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task)
+ node.refresh()
+
+ # Assert that the node clean was called
+ self.assertTrue(mock_validate.called)
+ self.assertIn('clean_steps', node.driver_internal_info)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ def test__do_node_clean_automated_disabled_individual_disabled(
+ self, mock_validate):
+ self.config(automated_clean=False, group='conductor')
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE,
+ last_error=None, automated_clean=False)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task)
+ node.refresh()
+
+ # Assert that the node was moved to available without cleaning
+ self.assertFalse(mock_validate.called)
+ self.assertEqual(states.AVAILABLE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_steps', node.driver_internal_info)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ def test__do_node_clean_automated_enabled(self, mock_validate,
+ mock_network):
+ self.config(automated_clean=True, group='conductor')
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE,
+ last_error=None,
+ driver_internal_info={'agent_url': 'url'})
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task)
+ node.refresh()
+
+ # Assert that the node was cleaned
+ self.assertTrue(mock_validate.called)
+ self.assertIn('clean_steps', node.driver_internal_info)
+ self.assertNotIn('agent_url', node.driver_internal_info)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ def test__do_node_clean_automated_enabled_individual_enabled(
+ self, mock_network, mock_validate):
+ self.config(automated_clean=True, group='conductor')
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE,
+ last_error=None, automated_clean=True)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task)
+ node.refresh()
+
+ # Assert that the node was cleaned
+ self.assertTrue(mock_validate.called)
+ self.assertIn('clean_steps', node.driver_internal_info)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ def test__do_node_clean_automated_enabled_individual_none(
+ self, mock_validate, mock_network):
+ self.config(automated_clean=True, group='conductor')
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE,
+ last_error=None, automated_clean=None)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task)
+ node.refresh()
+
+ # Assert that the node was cleaned
+ self.assertTrue(mock_validate.called)
+ self.assertIn('clean_steps', node.driver_internal_info)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down_cleaning',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning',
+ autospec=True)
+ def test__do_node_clean_maintenance(self, mock_prep, mock_tear_down):
+ CONF.set_override('allow_provisioning_in_maintenance', False,
+ group='conductor')
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=states.AVAILABLE,
+ maintenance=True,
+ maintenance_reason='Original reason')
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task)
+ node.refresh()
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(states.AVAILABLE, node.target_provision_state)
+ self.assertIn('is not allowed', node.last_error)
+ self.assertTrue(node.maintenance)
+ self.assertEqual('Original reason', node.maintenance_reason)
+ self.assertFalse(mock_prep.called)
+ self.assertFalse(mock_tear_down.called)
+
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning',
+ autospec=True)
+ def __do_node_clean_prepare_clean_fail(self, mock_prep, mock_validate,
+ clean_steps=None):
+ # Exception from task.driver.deploy.prepare_cleaning should cause node
+ # to go to CLEANFAIL
+ mock_prep.side_effect = exception.InvalidParameterValue('error')
+ tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task, clean_steps=clean_steps)
+ node.refresh()
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ mock_prep.assert_called_once_with(mock.ANY, task)
+ mock_validate.assert_called_once_with(mock.ANY, task)
+
+ def test__do_node_clean_automated_prepare_clean_fail(self):
+ self.__do_node_clean_prepare_clean_fail()
+
+ def test__do_node_clean_manual_prepare_clean_fail(self):
+ self.__do_node_clean_prepare_clean_fail(clean_steps=[self.deploy_raid])
+
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning',
+ autospec=True)
+ def __do_node_clean_prepare_clean_wait(self, mock_prep, mock_validate,
+ clean_steps=None):
+ mock_prep.return_value = states.CLEANWAIT
+ tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task, clean_steps=clean_steps)
+ node.refresh()
+ self.assertEqual(states.CLEANWAIT, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ mock_prep.assert_called_once_with(mock.ANY, mock.ANY)
+ mock_validate.assert_called_once_with(mock.ANY, mock.ANY)
+
+ def test__do_node_clean_automated_prepare_clean_wait(self):
+ self.__do_node_clean_prepare_clean_wait()
+
+ def test__do_node_clean_manual_prepare_clean_wait(self):
+ self.__do_node_clean_prepare_clean_wait(clean_steps=[self.deploy_raid])
+
+ @mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
+ @mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
+ autospec=True)
+ def __do_node_clean_steps_fail(self, mock_steps, mock_validate,
+ clean_steps=None, invalid_exc=True):
+ if invalid_exc:
+ mock_steps.side_effect = exception.InvalidParameterValue('invalid')
+ else:
+ mock_steps.side_effect = exception.NodeCleaningFailure('failure')
+ tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ uuid=uuidutils.generate_uuid(),
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task, clean_steps=clean_steps)
+ mock_validate.assert_called_once_with(mock.ANY, task)
+ node.refresh()
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ mock_steps.assert_called_once_with(mock.ANY)
+
+ def test__do_node_clean_automated_steps_fail(self):
+ for invalid in (True, False):
+ self.__do_node_clean_steps_fail(invalid_exc=invalid)
+
+ def test__do_node_clean_manual_steps_fail(self):
+ for invalid in (True, False):
+ self.__do_node_clean_steps_fail(clean_steps=[self.deploy_raid],
+ invalid_exc=invalid)
+
+ @mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
+ autospec=True)
+ @mock.patch.object(cleaning, 'do_next_clean_step', autospec=True)
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ def __do_node_clean(self, mock_power_valid, mock_network_valid,
+ mock_next_step, mock_steps, clean_steps=None):
+ if clean_steps:
+ tgt_prov_state = states.MANAGEABLE
+ driver_info = {}
+ else:
+ tgt_prov_state = states.AVAILABLE
+ driver_info = {'clean_steps': self.clean_steps}
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ power_state=states.POWER_OFF,
+ driver_internal_info=driver_info)
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task, clean_steps=clean_steps)
+
+ node.refresh()
+
+ mock_power_valid.assert_called_once_with(mock.ANY, task)
+ mock_network_valid.assert_called_once_with(mock.ANY, task)
+ mock_next_step.assert_called_once_with(task, 0)
+ mock_steps.assert_called_once_with(task)
+ if clean_steps:
+ self.assertEqual(clean_steps,
+ node.driver_internal_info['clean_steps'])
+
+ # Check that state didn't change
+ self.assertEqual(states.CLEANING, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+
+ def test__do_node_clean_automated(self):
+ self.__do_node_clean()
+
+ def test__do_node_clean_manual(self):
+ self.__do_node_clean(clean_steps=[self.deploy_raid])
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ def _do_next_clean_step_first_step_async(self, return_state, mock_execute,
+ clean_steps=None):
+ # Execute the first async clean step on a node
+ driver_internal_info = {'clean_step_index': None}
+ if clean_steps:
+ tgt_prov_state = states.MANAGEABLE
+ driver_internal_info['clean_steps'] = clean_steps
+ else:
+ tgt_prov_state = states.AVAILABLE
+ driver_internal_info['clean_steps'] = self.clean_steps
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info=driver_internal_info,
+ clean_step={})
+ mock_execute.return_value = return_state
+ expected_first_step = node.driver_internal_info['clean_steps'][0]
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, 0)
+
+ node.refresh()
+
+ self.assertEqual(states.CLEANWAIT, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual(expected_first_step, node.clean_step)
+ self.assertEqual(0, node.driver_internal_info['clean_step_index'])
+ mock_execute.assert_called_once_with(
+ mock.ANY, mock.ANY, expected_first_step)
+
+ def test_do_next_clean_step_automated_first_step_async(self):
+ self._do_next_clean_step_first_step_async(states.CLEANWAIT)
+
+ def test_do_next_clean_step_manual_first_step_async(self):
+ self._do_next_clean_step_first_step_async(
+ states.CLEANWAIT, clean_steps=[self.deploy_raid])
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
+ autospec=True)
+ def _do_next_clean_step_continue_from_last_cleaning(self, return_state,
+ mock_execute,
+ manual=False):
+ # Resume an in-progress cleaning after the first async step
+ tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'clean_steps': self.clean_steps,
+ 'clean_step_index': 0},
+ clean_step=self.clean_steps[0])
+ mock_execute.return_value = return_state
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, self.next_clean_step_index)
+
+ node.refresh()
+
+ self.assertEqual(states.CLEANWAIT, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual(self.clean_steps[1], node.clean_step)
+ self.assertEqual(1, node.driver_internal_info['clean_step_index'])
+ mock_execute.assert_called_once_with(
+ mock.ANY, mock.ANY, self.clean_steps[1])
+
+ def test_do_next_clean_step_continue_from_last_cleaning(self):
+ self._do_next_clean_step_continue_from_last_cleaning(states.CLEANWAIT)
+
+ def test_do_next_clean_step_manual_continue_from_last_cleaning(self):
+ self._do_next_clean_step_continue_from_last_cleaning(states.CLEANWAIT,
+ manual=True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ def _do_next_clean_step_last_step_noop(self, mock_execute, manual=False,
+ retired=False):
+ # Resume where last_step is the last cleaning step, should be noop
+ tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
+ info = {'clean_steps': self.clean_steps,
+ 'clean_step_index': len(self.clean_steps) - 1}
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info=info,
+ clean_step=self.clean_steps[-1],
+ retired=retired)
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, None)
+
+ node.refresh()
+
+ # retired nodes move to manageable upon cleaning
+ if retired:
+ tgt_prov_state = states.MANAGEABLE
+
+ # Cleaning should be complete without calling additional steps
+ self.assertEqual(tgt_prov_state, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertIsNone(node.driver_internal_info['clean_steps'])
+ self.assertFalse(mock_execute.called)
+
+ def test__do_next_clean_step_automated_last_step_noop(self):
+ self._do_next_clean_step_last_step_noop()
+
+ def test__do_next_clean_step_manual_last_step_noop(self):
+ self._do_next_clean_step_last_step_noop(manual=True)
+
+ def test__do_next_clean_step_retired_last_step_change_tgt_state(self):
+ self._do_next_clean_step_last_step_noop(retired=True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ def _do_next_clean_step_all(self, mock_deploy_execute,
+ mock_power_execute, manual=False):
+ # Run all steps from start to finish (all synchronous)
+ tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'clean_steps': self.clean_steps,
+ 'clean_step_index': None},
+ clean_step={})
+
+ def fake_deploy(conductor_obj, task, step):
+ driver_internal_info = task.node.driver_internal_info
+ driver_internal_info['goober'] = 'test'
+ task.node.driver_internal_info = driver_internal_info
+ task.node.save()
+
+ mock_deploy_execute.side_effect = fake_deploy
+ mock_power_execute.return_value = None
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, 0)
+
+ node.refresh()
+
+ # Cleaning should be complete
+ self.assertEqual(tgt_prov_state, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertEqual('test', node.driver_internal_info['goober'])
+ self.assertIsNone(node.driver_internal_info['clean_steps'])
+ mock_power_execute.assert_called_once_with(mock.ANY, mock.ANY,
+ self.clean_steps[1])
+ mock_deploy_execute.assert_has_calls(
+ [mock.call(mock.ANY, mock.ANY, self.clean_steps[0]),
+ mock.call(mock.ANY, mock.ANY, self.clean_steps[2])])
+
+ def test_do_next_clean_step_automated_all(self):
+ self._do_next_clean_step_all()
+
+ def test_do_next_clean_step_manual_all(self):
+ self._do_next_clean_step_all(manual=True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
+ def _do_next_clean_step_execute_fail(self, tear_mock, mock_execute,
+ manual=False):
+ # When a clean step fails, go to CLEANFAIL
+ tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'clean_steps': self.clean_steps,
+ 'clean_step_index': None},
+ clean_step={})
+ mock_execute.side_effect = Exception()
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, 0)
+ tear_mock.assert_called_once_with(task.driver.deploy, task)
+
+ node.refresh()
+
+ # Make sure we go to CLEANFAIL, clear clean_steps
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertIsNotNone(node.last_error)
+ self.assertTrue(node.maintenance)
+ mock_execute.assert_called_once_with(
+ mock.ANY, mock.ANY, self.clean_steps[0])
+
+ def test__do_next_clean_step_automated_execute_fail(self):
+ self._do_next_clean_step_execute_fail()
+
+ def test__do_next_clean_step_manual_execute_fail(self):
+ self._do_next_clean_step_execute_fail(manual=True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ def test_do_next_clean_step_oob_reboot(self, mock_execute):
+ # When a clean step fails, go to CLEANWAIT
+ tgt_prov_state = states.MANAGEABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'clean_steps': self.clean_steps,
+ 'clean_step_index': None,
+ 'cleaning_reboot': True},
+ clean_step={})
+ mock_execute.side_effect = exception.AgentConnectionFailed(
+ reason='failed')
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, 0)
+
+ node.refresh()
+
+ # Make sure we go to CLEANWAIT
+ self.assertEqual(states.CLEANWAIT, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual(self.clean_steps[0], node.clean_step)
+ self.assertEqual(0, node.driver_internal_info['clean_step_index'])
+ self.assertFalse(node.driver_internal_info['skip_current_clean_step'])
+ mock_execute.assert_called_once_with(
+ mock.ANY, mock.ANY, self.clean_steps[0])
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ def test_do_next_clean_step_oob_reboot_last_step(self, mock_execute):
+ # Resume where last_step is the last cleaning step
+ tgt_prov_state = states.MANAGEABLE
+ info = {'clean_steps': self.clean_steps,
+ 'cleaning_reboot': True,
+ 'clean_step_index': len(self.clean_steps) - 1}
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info=info,
+ clean_step=self.clean_steps[-1])
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, None)
+
+ node.refresh()
+
+ # Cleaning should be complete without calling additional steps
+ self.assertEqual(tgt_prov_state, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertNotIn('cleaning_reboot', node.driver_internal_info)
+ self.assertIsNone(node.driver_internal_info['clean_steps'])
+ self.assertFalse(mock_execute.called)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
+ def test_do_next_clean_step_oob_reboot_fail(self, tear_mock,
+ mock_execute):
+ # When a clean step fails with no reboot requested go to CLEANFAIL
+ tgt_prov_state = states.MANAGEABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'clean_steps': self.clean_steps,
+ 'clean_step_index': None},
+ clean_step={})
+ mock_execute.side_effect = exception.AgentConnectionFailed(
+ reason='failed')
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, 0)
+ tear_mock.assert_called_once_with(task.driver.deploy, task)
+
+ node.refresh()
+
+ # Make sure we go to CLEANFAIL, clear clean_steps
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertNotIn('skip_current_clean_step', node.driver_internal_info)
+ self.assertIsNotNone(node.last_error)
+ self.assertTrue(node.maintenance)
+ mock_execute.assert_called_once_with(
+ mock.ANY, mock.ANY, self.clean_steps[0])
+
+ @mock.patch.object(cleaning, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
+ autospec=True)
+ @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
+ def _do_next_clean_step_fail_in_tear_down_cleaning(
+ self, tear_mock, power_exec_mock, deploy_exec_mock, log_mock,
+ manual=True):
+ tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'clean_steps': self.clean_steps,
+ 'clean_step_index': None},
+ clean_step={})
+
+ deploy_exec_mock.return_value = None
+ power_exec_mock.return_value = None
+ tear_mock.side_effect = Exception('boom')
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, 0)
+
+ node.refresh()
+
+ # Make sure we go to CLEANFAIL, clear clean_steps
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertIsNotNone(node.last_error)
+ self.assertEqual(1, tear_mock.call_count)
+ self.assertTrue(node.maintenance)
+ deploy_exec_calls = [
+ mock.call(mock.ANY, mock.ANY, self.clean_steps[0]),
+ mock.call(mock.ANY, mock.ANY, self.clean_steps[2]),
+ ]
+ self.assertEqual(deploy_exec_calls, deploy_exec_mock.call_args_list)
+
+ power_exec_calls = [
+ mock.call(mock.ANY, mock.ANY, self.clean_steps[1]),
+ ]
+ self.assertEqual(power_exec_calls, power_exec_mock.call_args_list)
+ log_mock.exception.assert_called_once_with(
+ 'Failed to tear down from cleaning for node {}, reason: boom'
+ .format(node.uuid))
+
+ def test__do_next_clean_step_automated_fail_in_tear_down_cleaning(self):
+ self._do_next_clean_step_fail_in_tear_down_cleaning()
+
+ def test__do_next_clean_step_manual_fail_in_tear_down_cleaning(self):
+ self._do_next_clean_step_fail_in_tear_down_cleaning(manual=True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ def _do_next_clean_step_no_steps(self, mock_execute, manual=False,
+ fast_track=False):
+ if fast_track:
+ self.config(fast_track=True, group='deploy')
+
+ for info in ({'clean_steps': None, 'clean_step_index': None,
+ 'agent_url': 'test-url'},
+ {'clean_steps': None, 'agent_url': 'test-url'}):
+ # Resume where there are no steps, should be a noop
+ tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ uuid=uuidutils.generate_uuid(),
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info=info,
+ clean_step={})
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, None)
+
+ node.refresh()
+
+ # Cleaning should be complete without calling additional steps
+ self.assertEqual(tgt_prov_state, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertFalse(mock_execute.called)
+ if fast_track:
+ self.assertEqual('test-url',
+ node.driver_internal_info.get('agent_url'))
+ else:
+ self.assertNotIn('agent_url', node.driver_internal_info)
+ mock_execute.reset_mock()
+
+ def test__do_next_clean_step_automated_no_steps(self):
+ self._do_next_clean_step_no_steps()
+
+ def test__do_next_clean_step_manual_no_steps(self):
+ self._do_next_clean_step_no_steps(manual=True)
+
+ def test__do_next_clean_step_fast_track(self):
+ self._do_next_clean_step_no_steps(fast_track=True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ def _do_next_clean_step_bad_step_return_value(
+ self, deploy_exec_mock, power_exec_mock, manual=False):
+ # When a clean step fails, go to CLEANFAIL
+ tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'clean_steps': self.clean_steps,
+ 'clean_step_index': None},
+ clean_step={})
+ deploy_exec_mock.return_value = "foo"
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, 0)
+
+ node.refresh()
+
+ # Make sure we go to CLEANFAIL, clear clean_steps
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertIsNotNone(node.last_error)
+ self.assertTrue(node.maintenance)
+ deploy_exec_mock.assert_called_once_with(mock.ANY, mock.ANY,
+ self.clean_steps[0])
+ # Make sure we don't execute any other step and return
+ self.assertFalse(power_exec_mock.called)
+
+ def test__do_next_clean_step_automated_bad_step_return_value(self):
+ self._do_next_clean_step_bad_step_return_value()
+
+ def test__do_next_clean_step_manual_bad_step_return_value(self):
+ self._do_next_clean_step_bad_step_return_value(manual=True)
+
+
+class DoNodeCleanAbortTestCase(db_base.DbTestCase):
+ @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
+ def _test__do_node_clean_abort(self, step_name, tear_mock):
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANFAIL,
+ target_provision_state=states.AVAILABLE,
+ clean_step={'step': 'foo', 'abortable': True},
+ driver_internal_info={
+ 'clean_step_index': 2,
+ 'cleaning_reboot': True,
+ 'cleaning_polling': True,
+ 'skip_current_clean_step': True})
+
+ with task_manager.acquire(self.context, node.uuid) as task:
+ cleaning.do_node_clean_abort(task, step_name=step_name)
+ self.assertIsNotNone(task.node.last_error)
+ tear_mock.assert_called_once_with(task.driver.deploy, task)
+ if step_name:
+ self.assertIn(step_name, task.node.last_error)
+ # assert node's clean_step and metadata was cleaned up
+ self.assertEqual({}, task.node.clean_step)
+ self.assertNotIn('clean_step_index',
+ task.node.driver_internal_info)
+ self.assertNotIn('cleaning_reboot',
+ task.node.driver_internal_info)
+ self.assertNotIn('cleaning_polling',
+ task.node.driver_internal_info)
+ self.assertNotIn('skip_current_clean_step',
+ task.node.driver_internal_info)
+
+ def test__do_node_clean_abort(self):
+ self._test__do_node_clean_abort(None)
+
+ def test__do_node_clean_abort_with_step_name(self):
+ self._test__do_node_clean_abort('foo')
+
+ @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
+ def test__do_node_clean_abort_tear_down_fail(self, tear_mock):
+ tear_mock.side_effect = Exception('Surprise')
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANFAIL,
+ target_provision_state=states.AVAILABLE,
+ clean_step={'step': 'foo', 'abortable': True})
+
+ with task_manager.acquire(self.context, node.uuid) as task:
+ cleaning.do_node_clean_abort(task)
+ tear_mock.assert_called_once_with(task.driver.deploy, task)
+ self.assertIsNotNone(task.node.last_error)
+ self.assertIsNotNone(task.node.maintenance_reason)
+ self.assertTrue(task.node.maintenance)
+ self.assertEqual('clean failure', task.node.fault)
diff --git a/ironic/tests/unit/conductor/test_deployments.py b/ironic/tests/unit/conductor/test_deployments.py
new file mode 100644
index 000000000..ba8b327fe
--- /dev/null
+++ b/ironic/tests/unit/conductor/test_deployments.py
@@ -0,0 +1,1017 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for deployment aspects of the conductor."""
+
+import mock
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_utils import uuidutils
+
+from ironic.common import exception
+from ironic.common import states
+from ironic.common import swift
+from ironic.conductor import deployments
+from ironic.conductor import steps as conductor_steps
+from ironic.conductor import task_manager
+from ironic.conductor import utils as conductor_utils
+from ironic.db import api as dbapi
+from ironic.drivers.modules import fake
+from ironic.tests.unit.conductor import mgr_utils
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.objects import utils as obj_utils
+
+CONF = cfg.CONF
+
+
+@mgr_utils.mock_record_keepalive
+class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ def test__do_node_deploy_driver_raises_prepare_error(self, mock_prepare,
+ mock_deploy):
+ self._start_service()
+ # test when driver.deploy.prepare raises an ironic error
+ mock_prepare.side_effect = exception.InstanceDeployFailure('test')
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ self.assertRaises(exception.InstanceDeployFailure,
+ deployments.do_node_deploy, task,
+ self.service.conductor.id)
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ # NOTE(deva): failing a deploy does not clear the target state
+ # any longer. Instead, it is cleared when the instance
+ # is deleted.
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ self.assertTrue(mock_prepare.called)
+ self.assertFalse(mock_deploy.called)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ def test__do_node_deploy_unexpected_prepare_error(self, mock_prepare,
+ mock_deploy):
+ self._start_service()
+ # test when driver.deploy.prepare raises an exception
+ mock_prepare.side_effect = RuntimeError('test')
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ self.assertRaises(RuntimeError,
+ deployments.do_node_deploy, task,
+ self.service.conductor.id)
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ # NOTE(deva): failing a deploy does not clear the target state
+ # any longer. Instead, it is cleared when the instance
+ # is deleted.
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ self.assertTrue(mock_prepare.called)
+ self.assertFalse(mock_deploy.called)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
+ def test__do_node_deploy_driver_raises_error_old(self, mock_deploy):
+ # TODO(rloo): delete this after the deprecation period for supporting
+ # non deploy_steps.
+ # Mocking FakeDeploy.deploy before starting the service, causes
+ # it not to be a deploy_step.
+ self._start_service()
+ # test when driver.deploy.deploy raises an ironic error
+ mock_deploy.side_effect = exception.InstanceDeployFailure('test')
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ self.assertRaises(exception.InstanceDeployFailure,
+ deployments.do_node_deploy, task,
+ self.service.conductor.id)
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ # NOTE(deva): failing a deploy does not clear the target state
+ # any longer. Instead, it is cleared when the instance
+ # is deleted.
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ mock_deploy.assert_called_once_with(mock.ANY)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
+ def test__do_node_deploy_driver_unexpected_exception_old(self,
+ mock_deploy):
+ # TODO(rloo): delete this after the deprecation period for supporting
+ # non deploy_steps.
+ # Mocking FakeDeploy.deploy before starting the service, causes
+ # it not to be a deploy_step.
+ self._start_service()
+ # test when driver.deploy.deploy raises an exception
+ mock_deploy.side_effect = RuntimeError('test')
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ self.assertRaises(RuntimeError,
+ deployments.do_node_deploy, task,
+ self.service.conductor.id)
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ # NOTE(deva): failing a deploy does not clear the target state
+ # any longer. Instead, it is cleared when the instance
+ # is deleted.
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ mock_deploy.assert_called_once_with(mock.ANY)
+
+ def _test__do_node_deploy_driver_exception(self, exc, unexpected=False):
+ self._start_service()
+ with mock.patch.object(fake.FakeDeploy,
+ 'deploy', autospec=True) as mock_deploy:
+ # test when driver.deploy.deploy() raises an exception
+ mock_deploy.side_effect = exc
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ deployments.do_node_deploy(task, self.service.conductor.id)
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ # NOTE(deva): failing a deploy does not clear the target state
+ # any longer. Instead, it is cleared when the instance
+ # is deleted.
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ if unexpected:
+ self.assertIn('Exception', node.last_error)
+ else:
+ self.assertNotIn('Exception', node.last_error)
+
+ mock_deploy.assert_called_once_with(mock.ANY, task)
+
+ def test__do_node_deploy_driver_ironic_exception(self):
+ self._test__do_node_deploy_driver_exception(
+ exception.InstanceDeployFailure('test'))
+
+ def test__do_node_deploy_driver_unexpected_exception(self):
+ self._test__do_node_deploy_driver_exception(RuntimeError('test'),
+ unexpected=True)
+
+ @mock.patch.object(deployments, '_store_configdrive', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
+ def test__do_node_deploy_ok_old(self, mock_deploy, mock_store):
+ # TODO(rloo): delete this after the deprecation period for supporting
+ # non deploy_steps.
+ # Mocking FakeDeploy.deploy before starting the service, causes
+ # it not to be a deploy_step.
+ self._start_service()
+ # test when driver.deploy.deploy returns DEPLOYDONE
+ mock_deploy.return_value = states.DEPLOYDONE
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ deployments.do_node_deploy(task, self.service.conductor.id)
+ node.refresh()
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertIsNone(node.last_error)
+ mock_deploy.assert_called_once_with(mock.ANY)
+ # assert _store_configdrive wasn't invoked
+ self.assertFalse(mock_store.called)
+
+ @mock.patch.object(deployments, '_store_configdrive', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
+ def test__do_node_deploy_ok_configdrive_old(self, mock_deploy, mock_store):
+ # TODO(rloo): delete this after the deprecation period for supporting
+ # non deploy_steps.
+ # Mocking FakeDeploy.deploy before starting the service, causes
+ # it not to be a deploy_step.
+ self._start_service()
+ # test when driver.deploy.deploy returns DEPLOYDONE
+ mock_deploy.return_value = states.DEPLOYDONE
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+ configdrive = 'foo'
+
+ deployments.do_node_deploy(task, self.service.conductor.id,
+ configdrive=configdrive)
+ node.refresh()
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertIsNone(node.last_error)
+ mock_deploy.assert_called_once_with(mock.ANY)
+ mock_store.assert_called_once_with(task.node, configdrive)
+
+ @mock.patch.object(deployments, '_store_configdrive', autospec=True)
+ def _test__do_node_deploy_ok(self, mock_store, configdrive=None,
+ expected_configdrive=None):
+ expected_configdrive = expected_configdrive or configdrive
+ self._start_service()
+ with mock.patch.object(fake.FakeDeploy,
+ 'deploy', autospec=True) as mock_deploy:
+ mock_deploy.return_value = None
+ self.node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware', name=None,
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, self.node.uuid)
+
+ deployments.do_node_deploy(task, self.service.conductor.id,
+ configdrive=configdrive)
+ self.node.refresh()
+ self.assertEqual(states.ACTIVE, self.node.provision_state)
+ self.assertEqual(states.NOSTATE, self.node.target_provision_state)
+ self.assertIsNone(self.node.last_error)
+ mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
+ if configdrive:
+ mock_store.assert_called_once_with(task.node,
+ expected_configdrive)
+ else:
+ self.assertFalse(mock_store.called)
+
+ def test__do_node_deploy_ok(self):
+ self._test__do_node_deploy_ok()
+
+ def test__do_node_deploy_ok_configdrive(self):
+ configdrive = 'foo'
+ self._test__do_node_deploy_ok(configdrive=configdrive)
+
+ @mock.patch('openstack.baremetal.configdrive.build')
+ def test__do_node_deploy_configdrive_as_dict(self, mock_cd):
+ mock_cd.return_value = 'foo'
+ configdrive = {'user_data': 'abcd'}
+ self._test__do_node_deploy_ok(configdrive=configdrive,
+ expected_configdrive='foo')
+ mock_cd.assert_called_once_with({'uuid': self.node.uuid},
+ network_data=None,
+ user_data=b'abcd',
+ vendor_data=None)
+
+ @mock.patch('openstack.baremetal.configdrive.build')
+ def test__do_node_deploy_configdrive_as_dict_with_meta_data(self, mock_cd):
+ mock_cd.return_value = 'foo'
+ configdrive = {'meta_data': {'uuid': uuidutils.generate_uuid(),
+ 'name': 'new-name',
+ 'hostname': 'example.com'}}
+ self._test__do_node_deploy_ok(configdrive=configdrive,
+ expected_configdrive='foo')
+ mock_cd.assert_called_once_with(configdrive['meta_data'],
+ network_data=None,
+ user_data=None,
+ vendor_data=None)
+
+ @mock.patch('openstack.baremetal.configdrive.build')
+ def test__do_node_deploy_configdrive_with_network_data(self, mock_cd):
+ mock_cd.return_value = 'foo'
+ configdrive = {'network_data': {'links': []}}
+ self._test__do_node_deploy_ok(configdrive=configdrive,
+ expected_configdrive='foo')
+ mock_cd.assert_called_once_with({'uuid': self.node.uuid},
+ network_data={'links': []},
+ user_data=None,
+ vendor_data=None)
+
+ @mock.patch('openstack.baremetal.configdrive.build')
+ def test__do_node_deploy_configdrive_and_user_data_as_dict(self, mock_cd):
+ mock_cd.return_value = 'foo'
+ configdrive = {'user_data': {'user': 'data'}}
+ self._test__do_node_deploy_ok(configdrive=configdrive,
+ expected_configdrive='foo')
+ mock_cd.assert_called_once_with({'uuid': self.node.uuid},
+ network_data=None,
+ user_data=b'{"user": "data"}',
+ vendor_data=None)
+
+ @mock.patch('openstack.baremetal.configdrive.build')
+ def test__do_node_deploy_configdrive_with_vendor_data(self, mock_cd):
+ mock_cd.return_value = 'foo'
+ configdrive = {'vendor_data': {'foo': 'bar'}}
+ self._test__do_node_deploy_ok(configdrive=configdrive,
+ expected_configdrive='foo')
+ mock_cd.assert_called_once_with({'uuid': self.node.uuid},
+ network_data=None,
+ user_data=None,
+ vendor_data={'foo': 'bar'})
+
+ @mock.patch.object(swift, 'SwiftAPI')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ def test__do_node_deploy_configdrive_swift_error(self, mock_prepare,
+ mock_swift):
+ CONF.set_override('configdrive_use_object_store', True,
+ group='deploy')
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ mock_swift.side_effect = exception.SwiftOperationError('error')
+ self.assertRaises(exception.SwiftOperationError,
+ deployments.do_node_deploy, task,
+ self.service.conductor.id,
+ configdrive=b'fake config drive')
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ self.assertFalse(mock_prepare.called)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ def test__do_node_deploy_configdrive_db_error(self, mock_prepare):
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.node.save()
+ expected_instance_info = dict(node.instance_info)
+ with mock.patch.object(dbapi.IMPL, 'update_node') as mock_db:
+ db_node = self.dbapi.get_node_by_uuid(node.uuid)
+ mock_db.side_effect = [db_exception.DBDataError('DB error'),
+ db_node, db_node, db_node]
+ self.assertRaises(db_exception.DBDataError,
+ deployments.do_node_deploy, task,
+ self.service.conductor.id,
+ configdrive=b'fake config drive')
+ expected_instance_info.update(configdrive=b'fake config drive')
+ expected_calls = [
+ mock.call(node.uuid,
+ {'version': mock.ANY,
+ 'instance_info': expected_instance_info}),
+ mock.call(node.uuid,
+ {'version': mock.ANY,
+ 'last_error': mock.ANY}),
+ mock.call(node.uuid,
+ {'version': mock.ANY,
+ 'deploy_step': {},
+ 'driver_internal_info': mock.ANY}),
+ mock.call(node.uuid,
+ {'version': mock.ANY,
+ 'provision_state': states.DEPLOYFAIL,
+ 'target_provision_state': states.ACTIVE}),
+ ]
+ self.assertEqual(expected_calls, mock_db.mock_calls)
+ self.assertFalse(mock_prepare.called)
+
+ @mock.patch.object(deployments, '_store_configdrive', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ def test__do_node_deploy_configdrive_unexpected_error(self, mock_prepare,
+ mock_store):
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ mock_store.side_effect = RuntimeError('unexpected')
+ self.assertRaises(RuntimeError,
+ deployments.do_node_deploy, task,
+ self.service.conductor.id,
+ configdrive=b'fake config drive')
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ self.assertFalse(mock_prepare.called)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
+ def test__do_node_deploy_ok_2_old(self, mock_deploy):
+ # TODO(rloo): delete this after the deprecation period for supporting
+ # non deploy_steps.
+ # Mocking FakeDeploy.deploy before starting the service, causes
+ # it not to be a deploy_step.
+ # NOTE(rloo): a different way of testing for the same thing as in
+ # test__do_node_deploy_ok()
+ self._start_service()
+ # test when driver.deploy.deploy returns DEPLOYDONE
+ mock_deploy.return_value = states.DEPLOYDONE
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_node_deploy(task, self.service.conductor.id)
+ node.refresh()
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertIsNone(node.last_error)
+ mock_deploy.assert_called_once_with(mock.ANY)
+
+ def test__do_node_deploy_ok_2(self):
+ # NOTE(rloo): a different way of testing for the same thing as in
+ # test__do_node_deploy_ok(). Instead of specifying the provision &
+ # target_provision_states when creating the node, we call
+ # task.process_event() to "set the stage" (err "states").
+ self._start_service()
+ with mock.patch.object(fake.FakeDeploy,
+ 'deploy', autospec=True) as mock_deploy:
+ # test when driver.deploy.deploy() returns None
+ mock_deploy.return_value = None
+ node = obj_utils.create_test_node(self.context,
+ driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_node_deploy(task, self.service.conductor.id)
+ node.refresh()
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertIsNone(node.last_error)
+ mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
+
+ @mock.patch.object(deployments, 'do_next_deploy_step', autospec=True)
+ @mock.patch.object(deployments, '_old_rest_of_do_node_deploy',
+ autospec=True)
+ @mock.patch.object(conductor_steps, 'set_node_deployment_steps',
+ autospec=True)
+ def test_do_node_deploy_deprecated(self, mock_set_steps, mock_old_way,
+ mock_deploy_step):
+ # TODO(rloo): no deploy steps; delete this when we remove support
+ # for handling no deploy steps.
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_node_deploy(task, self.service.conductor.id)
+ mock_set_steps.assert_called_once_with(task)
+ mock_old_way.assert_called_once_with(task, self.service.conductor.id,
+ True)
+ self.assertFalse(mock_deploy_step.called)
+ self.assertNotIn('deploy_steps', task.node.driver_internal_info)
+
+ @mock.patch.object(deployments, 'do_next_deploy_step', autospec=True)
+ @mock.patch.object(deployments, '_old_rest_of_do_node_deploy',
+ autospec=True)
+ @mock.patch.object(conductor_steps, 'set_node_deployment_steps',
+ autospec=True)
+ def test_do_node_deploy_steps(self, mock_set_steps, mock_old_way,
+ mock_deploy_step):
+ # these are not real steps...
+ fake_deploy_steps = ['step1', 'step2']
+
+ def add_steps(task):
+ info = task.node.driver_internal_info
+ info['deploy_steps'] = fake_deploy_steps
+ task.node.driver_internal_info = info
+ task.node.save()
+
+ mock_set_steps.side_effect = add_steps
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_node_deploy(task, self.service.conductor.id)
+ mock_set_steps.assert_called_once_with(task)
+ self.assertFalse(mock_old_way.called)
+ mock_set_steps.assert_called_once_with(task)
+ self.assertEqual(fake_deploy_steps,
+ task.node.driver_internal_info['deploy_steps'])
+
+ @mock.patch.object(deployments, 'do_next_deploy_step', autospec=True)
+ @mock.patch.object(deployments, '_old_rest_of_do_node_deploy',
+ autospec=True)
+ @mock.patch.object(conductor_steps, 'set_node_deployment_steps',
+ autospec=True)
+ def test_do_node_deploy_steps_old_rpc(self, mock_set_steps, mock_old_way,
+ mock_deploy_step):
+ # TODO(rloo): old RPC; delete this when we remove support for drivers
+ # with no deploy steps.
+ CONF.set_override('pin_release_version', '11.0')
+ # these are not real steps...
+ fake_deploy_steps = ['step1', 'step2']
+
+ def add_steps(task):
+ info = task.node.driver_internal_info
+ info['deploy_steps'] = fake_deploy_steps
+ task.node.driver_internal_info = info
+ task.node.save()
+
+ mock_set_steps.side_effect = add_steps
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_node_deploy(task, self.service.conductor.id)
+ mock_set_steps.assert_called_once_with(task)
+ mock_old_way.assert_called_once_with(task, self.service.conductor.id,
+ False)
+ self.assertFalse(mock_deploy_step.called)
+ self.assertNotIn('deploy_steps', task.node.driver_internal_info)
+
+ @mock.patch.object(deployments, '_SEEN_NO_DEPLOY_STEP_DEPRECATIONS',
+ autospec=True)
+ @mock.patch.object(deployments, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True)
+ def test__old_rest_of_do_node_deploy_no_steps(self, mock_deploy, mock_log,
+ mock_deprecate):
+ # TODO(rloo): no deploy steps; delete this when we remove support
+ # for handling no deploy steps.
+ mock_deprecate.__contains__.side_effect = [False, True]
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments._old_rest_of_do_node_deploy(
+ task, self.service.conductor.id, True)
+ mock_deploy.assert_called_once_with(mock.ANY, task)
+ self.assertTrue(mock_log.warning.called)
+ self.assertEqual(self.service.conductor.id,
+ task.node.conductor_affinity)
+ mock_deprecate.__contains__.assert_called_once_with('FakeDeploy')
+ mock_deprecate.add.assert_called_once_with('FakeDeploy')
+
+ # Make sure the deprecation warning isn't logged again
+ mock_log.reset_mock()
+ mock_deprecate.add.reset_mock()
+ deployments._old_rest_of_do_node_deploy(
+ task, self.service.conductor.id, True)
+ self.assertFalse(mock_log.warning.called)
+ mock_deprecate.__contains__.assert_has_calls(
+ [mock.call('FakeDeploy'), mock.call('FakeDeploy')])
+ self.assertFalse(mock_deprecate.add.called)
+
+ @mock.patch.object(deployments, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True)
+ def test__old_rest_of_do_node_deploy_has_steps(self, mock_deploy,
+ mock_log):
+ # TODO(rloo): has steps but old RPC; delete this when we remove support
+ # for handling no deploy steps.
+ deployments._SEEN_NO_DEPLOY_STEP_DEPRECATIONS = set()
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments._old_rest_of_do_node_deploy(
+ task, self.service.conductor.id, False)
+ mock_deploy.assert_called_once_with(mock.ANY, task)
+ self.assertFalse(mock_log.warning.called)
+ self.assertEqual(self.service.conductor.id,
+ task.node.conductor_affinity)
+
+ @mock.patch('ironic.conductor.deployments._start_console_in_deploy',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True)
+ def test__old_rest_of_do_node_deploy_console(self, mock_deploy,
+ mock_console):
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+ mock_deploy.return_value = states.DEPLOYDONE
+
+ deployments._old_rest_of_do_node_deploy(
+ task, self.service.conductor.id, True)
+ mock_deploy.assert_called_once_with(mock.ANY, task)
+ mock_console.assert_called_once_with(task)
+ self.assertEqual(self.service.conductor.id,
+ task.node.conductor_affinity)
+
+
+@mgr_utils.mock_record_keepalive
+class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin,
+ db_base.DbTestCase):
+ def setUp(self):
+ super(DoNextDeployStepTestCase, self).setUp()
+ self.deploy_start = {
+ 'step': 'deploy_start', 'priority': 50, 'interface': 'deploy'}
+ self.deploy_end = {
+ 'step': 'deploy_end', 'priority': 20, 'interface': 'deploy'}
+ self.deploy_steps = [self.deploy_start, self.deploy_end]
+
+ @mock.patch.object(deployments, 'LOG', autospec=True)
+ def test__do_next_deploy_step_none(self, mock_log):
+ self._start_service()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_next_deploy_step(task, None, self.service.conductor.id)
+
+ node.refresh()
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(2, mock_log.info.call_count)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def test__do_next_deploy_step_async(self, mock_execute):
+ driver_internal_info = {'deploy_step_index': None,
+ 'deploy_steps': self.deploy_steps}
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_internal_info=driver_internal_info,
+ deploy_step={})
+ mock_execute.return_value = states.DEPLOYWAIT
+ expected_first_step = node.driver_internal_info['deploy_steps'][0]
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_next_deploy_step(task, 0, self.service.conductor.id)
+
+ node.refresh()
+ self.assertEqual(states.DEPLOYWAIT, node.provision_state)
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertEqual(expected_first_step, node.deploy_step)
+ self.assertEqual(0, node.driver_internal_info['deploy_step_index'])
+ self.assertEqual(self.service.conductor.id, node.conductor_affinity)
+ mock_execute.assert_called_once_with(mock.ANY, task,
+ self.deploy_steps[0])
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def test__do_next_deploy_step_continue_from_last_step(self, mock_execute):
+ # Resume an in-progress deploy after the first async step
+ driver_internal_info = {'deploy_step_index': 0,
+ 'deploy_steps': self.deploy_steps}
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYWAIT,
+ target_provision_state=states.ACTIVE,
+ driver_internal_info=driver_internal_info,
+ deploy_step=self.deploy_steps[0])
+ mock_execute.return_value = states.DEPLOYWAIT
+
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('resume')
+
+ deployments.do_next_deploy_step(task, 1, self.service.conductor.id)
+ node.refresh()
+
+ self.assertEqual(states.DEPLOYWAIT, node.provision_state)
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertEqual(self.deploy_steps[1], node.deploy_step)
+ self.assertEqual(1, node.driver_internal_info['deploy_step_index'])
+ mock_execute.assert_called_once_with(mock.ANY, task,
+ self.deploy_steps[1])
+
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def _test__do_next_deploy_step_last_step_done(self, mock_execute,
+ mock_console,
+ console_enabled=False,
+ console_error=False):
+ # Resume where last_step is the last deploy step that was executed
+ driver_internal_info = {'deploy_step_index': 1,
+ 'deploy_steps': self.deploy_steps}
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYWAIT,
+ target_provision_state=states.ACTIVE,
+ driver_internal_info=driver_internal_info,
+ deploy_step=self.deploy_steps[1],
+ console_enabled=console_enabled)
+ mock_execute.return_value = None
+ if console_error:
+ mock_console.side_effect = exception.ConsoleError()
+
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('resume')
+
+ deployments.do_next_deploy_step(task, None, self.service.conductor.id)
+ node.refresh()
+ # Deploying should be complete without calling additional steps
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.deploy_step)
+ self.assertNotIn('deploy_step_index', node.driver_internal_info)
+ self.assertIsNone(node.driver_internal_info['deploy_steps'])
+ self.assertFalse(mock_execute.called)
+ if console_enabled:
+ mock_console.assert_called_once_with(mock.ANY, task)
+ else:
+ self.assertFalse(mock_console.called)
+
+ def test__do_next_deploy_step_last_step_done(self):
+ self._test__do_next_deploy_step_last_step_done()
+
+ def test__do_next_deploy_step_last_step_done_with_console(self):
+ self._test__do_next_deploy_step_last_step_done(console_enabled=True)
+
+ def test__do_next_deploy_step_last_step_done_with_console_error(self):
+ self._test__do_next_deploy_step_last_step_done(console_enabled=True,
+ console_error=True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def test__do_next_deploy_step_all(self, mock_execute):
+ # Run all steps from start to finish (all synchronous)
+ driver_internal_info = {'deploy_step_index': None,
+ 'deploy_steps': self.deploy_steps,
+ 'agent_url': 'url'}
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_internal_info=driver_internal_info,
+ deploy_step={})
+ mock_execute.return_value = None
+
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_next_deploy_step(task, 1, self.service.conductor.id)
+
+ # Deploying should be complete
+ node.refresh()
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.deploy_step)
+ self.assertNotIn('deploy_step_index', node.driver_internal_info)
+ self.assertIsNone(node.driver_internal_info['deploy_steps'])
+ mock_execute.assert_has_calls = [mock.call(self.deploy_steps[0]),
+ mock.call(self.deploy_steps[1])]
+ self.assertNotIn('agent_url', node.driver_internal_info)
+
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def _do_next_deploy_step_execute_fail(self, exc, traceback,
+ mock_execute, mock_log):
+ # When a deploy step fails, go to DEPLOYFAIL
+ driver_internal_info = {'deploy_step_index': None,
+ 'deploy_steps': self.deploy_steps}
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_internal_info=driver_internal_info,
+ deploy_step={})
+ mock_execute.side_effect = exc
+
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_next_deploy_step(task, 0, self.service.conductor.id)
+
+ # Make sure we go to DEPLOYFAIL, clear deploy_steps
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertEqual({}, node.deploy_step)
+ self.assertNotIn('deploy_step_index', node.driver_internal_info)
+ self.assertIsNotNone(node.last_error)
+ self.assertFalse(node.maintenance)
+ mock_execute.assert_called_once_with(mock.ANY, mock.ANY,
+ self.deploy_steps[0])
+ mock_log.error.assert_called_once_with(mock.ANY, exc_info=traceback)
+
+ def test_do_next_deploy_step_execute_ironic_exception(self):
+ self._do_next_deploy_step_execute_fail(
+ exception.IronicException('foo'), False)
+
+ def test_do_next_deploy_step_execute_exception(self):
+ self._do_next_deploy_step_execute_fail(Exception('foo'), True)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def test_do_next_deploy_step_no_steps(self, mock_execute):
+
+ self._start_service()
+ for info in ({'deploy_steps': None, 'deploy_step_index': None},
+ {'deploy_steps': None}):
+ # Resume where there are no steps, should be a noop
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ uuid=uuidutils.generate_uuid(),
+ last_error=None,
+ driver_internal_info=info,
+ deploy_step={})
+
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_next_deploy_step(task, None,
+ self.service.conductor.id)
+
+ # Deploying should be complete without calling additional steps
+ node.refresh()
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.deploy_step)
+ self.assertNotIn('deploy_step_index', node.driver_internal_info)
+ self.assertFalse(mock_execute.called)
+ mock_execute.reset_mock()
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def test_do_next_deploy_step_bad_step_return_value(self, mock_execute):
+ # When a deploy step fails, go to DEPLOYFAIL
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_internal_info={'deploy_steps': self.deploy_steps,
+ 'deploy_step_index': None},
+ deploy_step={})
+ mock_execute.return_value = "foo"
+
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_next_deploy_step(task, 0, self.service.conductor.id)
+
+ # Make sure we go to DEPLOYFAIL, clear deploy_steps
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertEqual({}, node.deploy_step)
+ self.assertNotIn('deploy_step_index', node.driver_internal_info)
+ self.assertIsNotNone(node.last_error)
+ self.assertFalse(node.maintenance)
+ mock_execute.assert_called_once_with(mock.ANY, mock.ANY,
+ self.deploy_steps[0])
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def test_do_next_deploy_step_oob_reboot(self, mock_execute):
+ # When a deploy step fails, go to DEPLOYWAIT
+ tgt_prov_state = states.ACTIVE
+
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'deploy_steps': self.deploy_steps,
+ 'deploy_step_index': None,
+ 'deployment_reboot': True},
+ clean_step={})
+ mock_execute.side_effect = exception.AgentConnectionFailed(
+ reason='failed')
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ deployments.do_next_deploy_step(task, 0, mock.ANY)
+
+ self._stop_service()
+ node.refresh()
+
+ # Make sure we go to CLEANWAIT
+ self.assertEqual(states.DEPLOYWAIT, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual(self.deploy_steps[0], node.deploy_step)
+ self.assertEqual(0, node.driver_internal_info['deploy_step_index'])
+ self.assertFalse(node.driver_internal_info['skip_current_deploy_step'])
+ mock_execute.assert_called_once_with(
+ mock.ANY, mock.ANY, self.deploy_steps[0])
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def test_do_next_deploy_step_oob_reboot_fail(self, mock_execute):
+ # When a deploy step fails with no reboot requested go to DEPLOYFAIL
+ tgt_prov_state = states.ACTIVE
+
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'deploy_steps': self.deploy_steps,
+ 'deploy_step_index': None},
+ deploy_step={})
+ mock_execute.side_effect = exception.AgentConnectionFailed(
+ reason='failed')
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ deployments.do_next_deploy_step(task, 0, mock.ANY)
+
+ self._stop_service()
+ node.refresh()
+
+ # Make sure we go to DEPLOYFAIL, clear deploy_steps
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ self.assertEqual({}, node.deploy_step)
+ self.assertNotIn('deploy_step_index', node.driver_internal_info)
+ self.assertNotIn('skip_current_deploy_step', node.driver_internal_info)
+ self.assertIsNotNone(node.last_error)
+ mock_execute.assert_called_once_with(
+ mock.ANY, mock.ANY, self.deploy_steps[0])
+
+
+@mock.patch.object(swift, 'SwiftAPI')
+class StoreConfigDriveTestCase(db_base.DbTestCase):
+
+ def setUp(self):
+ super(StoreConfigDriveTestCase, self).setUp()
+ self.node = obj_utils.create_test_node(self.context,
+ driver='fake-hardware',
+ instance_info=None)
+
+ def test_store_configdrive(self, mock_swift):
+ deployments._store_configdrive(self.node, 'foo')
+ expected_instance_info = {'configdrive': 'foo'}
+ self.node.refresh()
+ self.assertEqual(expected_instance_info, self.node.instance_info)
+ self.assertFalse(mock_swift.called)
+
+ def test_store_configdrive_swift(self, mock_swift):
+ container_name = 'foo_container'
+ timeout = 123
+ expected_obj_name = 'configdrive-%s' % self.node.uuid
+ expected_obj_header = {'X-Delete-After': str(timeout)}
+ expected_instance_info = {'configdrive': 'http://1.2.3.4'}
+
+ # set configs and mocks
+ CONF.set_override('configdrive_use_object_store', True,
+ group='deploy')
+ CONF.set_override('configdrive_swift_container', container_name,
+ group='conductor')
+ CONF.set_override('deploy_callback_timeout', timeout,
+ group='conductor')
+ mock_swift.return_value.get_temp_url.return_value = 'http://1.2.3.4'
+
+ deployments._store_configdrive(self.node, b'foo')
+
+ mock_swift.assert_called_once_with()
+ mock_swift.return_value.create_object.assert_called_once_with(
+ container_name, expected_obj_name, mock.ANY,
+ object_headers=expected_obj_header)
+ mock_swift.return_value.get_temp_url.assert_called_once_with(
+ container_name, expected_obj_name, timeout)
+ self.node.refresh()
+ self.assertEqual(expected_instance_info, self.node.instance_info)
+
+ def test_store_configdrive_swift_no_deploy_timeout(self, mock_swift):
+ container_name = 'foo_container'
+ expected_obj_name = 'configdrive-%s' % self.node.uuid
+ expected_obj_header = {'X-Delete-After': '1200'}
+ expected_instance_info = {'configdrive': 'http://1.2.3.4'}
+
+ # set configs and mocks
+ CONF.set_override('configdrive_use_object_store', True,
+ group='deploy')
+ CONF.set_override('configdrive_swift_container', container_name,
+ group='conductor')
+ CONF.set_override('configdrive_swift_temp_url_duration', 1200,
+ group='conductor')
+ CONF.set_override('deploy_callback_timeout', 0,
+ group='conductor')
+ mock_swift.return_value.get_temp_url.return_value = 'http://1.2.3.4'
+
+ deployments._store_configdrive(self.node, b'foo')
+
+ mock_swift.assert_called_once_with()
+ mock_swift.return_value.create_object.assert_called_once_with(
+ container_name, expected_obj_name, mock.ANY,
+ object_headers=expected_obj_header)
+ mock_swift.return_value.get_temp_url.assert_called_once_with(
+ container_name, expected_obj_name, 1200)
+ self.node.refresh()
+ self.assertEqual(expected_instance_info, self.node.instance_info)
+
+ def test_store_configdrive_swift_no_deploy_timeout_fallback(self,
+ mock_swift):
+ container_name = 'foo_container'
+ expected_obj_name = 'configdrive-%s' % self.node.uuid
+ expected_obj_header = {'X-Delete-After': '1800'}
+ expected_instance_info = {'configdrive': 'http://1.2.3.4'}
+
+ # set configs and mocks
+ CONF.set_override('configdrive_use_object_store', True,
+ group='deploy')
+ CONF.set_override('configdrive_swift_container', container_name,
+ group='conductor')
+ CONF.set_override('deploy_callback_timeout', 0,
+ group='conductor')
+ mock_swift.return_value.get_temp_url.return_value = 'http://1.2.3.4'
+
+ deployments._store_configdrive(self.node, b'foo')
+
+ mock_swift.assert_called_once_with()
+ mock_swift.return_value.create_object.assert_called_once_with(
+ container_name, expected_obj_name, mock.ANY,
+ object_headers=expected_obj_header)
+ mock_swift.return_value.get_temp_url.assert_called_once_with(
+ container_name, expected_obj_name, 1800)
+ self.node.refresh()
+ self.assertEqual(expected_instance_info, self.node.instance_info)
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index 50b7fcb62..4f334d68a 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -27,7 +27,6 @@ import eventlet
from futurist import waiters
import mock
from oslo_config import cfg
-from oslo_db import exception as db_exception
import oslo_messaging as messaging
from oslo_utils import uuidutils
from oslo_versionedobjects import base as ovo_base
@@ -39,7 +38,8 @@ from ironic.common import exception
from ironic.common import images
from ironic.common import nova
from ironic.common import states
-from ironic.common import swift
+from ironic.conductor import cleaning
+from ironic.conductor import deployments
from ironic.conductor import manager
from ironic.conductor import notification_utils
from ironic.conductor import steps as conductor_steps
@@ -553,6 +553,65 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertFalse(res['protected'])
self.assertIsNone(res['protected_reason'])
+ def test_update_node_retired_set(self):
+ for state in ('active', 'rescue', 'manageable'):
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ provision_state=state)
+
+ node.retired = True
+ res = self.service.update_node(self.context, node)
+ self.assertTrue(res['retired'])
+ self.assertIsNone(res['retired_reason'])
+
+ def test_update_node_retired_invalid_state(self):
+ # NOTE(arne_wiebalck): nodes in available cannot be 'retired'.
+ # This is to ensure backwards comaptibility.
+ node = obj_utils.create_test_node(self.context,
+ provision_state='available')
+
+ node.retired = True
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.update_node,
+ self.context,
+ node)
+ # Compare true exception hidden by @messaging.expected_exceptions
+ self.assertEqual(exception.InvalidState, exc.exc_info[0])
+
+ res = objects.Node.get_by_uuid(self.context, node['uuid'])
+ self.assertFalse(res['retired'])
+ self.assertIsNone(res['retired_reason'])
+
+ def test_update_node_retired_unset(self):
+ for state in ('active', 'manageable', 'rescue', 'rescue failed'):
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ provision_state=state,
+ retired=True,
+ retired_reason='EOL')
+
+ # check that ManagerService.update_node actually updates the node
+ node.retired = False
+ res = self.service.update_node(self.context, node)
+ self.assertFalse(res['retired'])
+ self.assertIsNone(res['retired_reason'])
+
+ def test_update_node_retired_reason_without_retired(self):
+ node = obj_utils.create_test_node(self.context,
+ provision_state='active')
+
+ node.retired_reason = 'warranty expired'
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.update_node,
+ self.context,
+ node)
+ # Compare true exception hidden by @messaging.expected_exceptions
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
+
+ res = objects.Node.get_by_uuid(self.context, node['uuid'])
+ self.assertFalse(res['retired'])
+ self.assertIsNone(res['retired_reason'])
+
def test_update_node_already_locked(self):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
extra={'test': 'one'})
@@ -1524,7 +1583,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
def test_do_node_deploy_rebuild_active_state_error(self, mock_iwdi):
- # Tests manager.do_node_deploy() & manager._do_next_deploy_step(),
+ # Tests manager.do_node_deploy() & deployments.do_next_deploy_step(),
# when getting an unexpected state returned from a deploy_step.
mock_iwdi.return_value = True
self._start_service()
@@ -1532,7 +1591,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step, causing
# manager._old_rest_of_do_node_deploy() to be run instead of
- # manager._do_next_deploy_step(). So we defer mock'ing until after
+ # deployments.do_next_deploy_step(). So we defer mock'ing until after
# the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
@@ -1569,7 +1628,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step, causing
# manager._old_rest_of_do_node_deploy() to be run instead of
- # manager._do_next_deploy_step(). So we defer mock'ing until after
+ # deployments.do_next_deploy_step(). So we defer mock'ing until after
# the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
@@ -1601,7 +1660,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step, causing
# manager._old_rest_of_do_node_deploy() to be run instead of
- # manager._do_next_deploy_step(). So we defer mock'ing until after
+ # deployments.do_next_deploy_step(). So we defer mock'ing until after
# the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
@@ -1632,7 +1691,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step, causing
# manager._old_rest_of_do_node_deploy() to be run instead of
- # manager._do_next_deploy_step(). So we defer mock'ing until after
+ # deployments.do_next_deploy_step(). So we defer mock'ing until after
# the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
@@ -1663,7 +1722,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
# decorator. With a decorator, when initialization is done, the
# mocked deploy() method isn't considered a deploy step, causing
# manager._old_rest_of_do_node_deploy() to be run instead of
- # manager._do_next_deploy_step(). So we defer mock'ing until after
+ # deployments.do_next_deploy_step(). So we defer mock'ing until after
# the init is done.
with mock.patch.object(fake.FakeDeploy,
'deploy', autospec=True) as mock_deploy:
@@ -1863,7 +1922,8 @@ class ContinueNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
node.refresh()
self.assertEqual(states.DEPLOYING, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
- mock_spawn.assert_called_with(mock.ANY, manager._do_next_deploy_step,
+ mock_spawn.assert_called_with(mock.ANY,
+ deployments.do_next_deploy_step,
mock.ANY, 1, mock.ANY)
@mock.patch.object(task_manager.TaskManager, 'process_event',
@@ -1891,7 +1951,8 @@ class ContinueNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
node.refresh()
self.assertEqual(states.DEPLOYING, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
- mock_spawn.assert_called_with(mock.ANY, manager._do_next_deploy_step,
+ mock_spawn.assert_called_with(mock.ANY,
+ deployments.do_next_deploy_step,
mock.ANY, 1, mock.ANY)
self.assertFalse(mock_event.called)
@@ -1918,7 +1979,8 @@ class ContinueNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
self.assertNotIn(
'skip_current_deploy_step', node.driver_internal_info)
expected_step_index = 0
- mock_spawn.assert_called_with(mock.ANY, manager._do_next_deploy_step,
+ mock_spawn.assert_called_with(mock.ANY,
+ deployments.do_next_deploy_step,
mock.ANY, expected_step_index, mock.ANY)
def test_continue_node_deploy_skip_step(self):
@@ -1944,938 +2006,10 @@ class ContinueNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
self._stop_service()
node.refresh()
self.assertNotIn('deployment_polling', node.driver_internal_info)
- mock_spawn.assert_called_with(mock.ANY, manager._do_next_deploy_step,
+ mock_spawn.assert_called_with(mock.ANY,
+ deployments.do_next_deploy_step,
mock.ANY, 1, mock.ANY)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def test_do_next_deploy_step_oob_reboot(self, mock_execute):
- # When a deploy step fails, go to DEPLOYWAIT
- tgt_prov_state = states.ACTIVE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'deploy_steps': self.deploy_steps,
- 'deploy_step_index': None,
- 'deployment_reboot': True},
- clean_step={})
- mock_execute.side_effect = exception.AgentConnectionFailed(
- reason='failed')
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- manager._do_next_deploy_step(task, 0, mock.ANY)
-
- self._stop_service()
- node.refresh()
-
- # Make sure we go to CLEANWAIT
- self.assertEqual(states.DEPLOYWAIT, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual(self.deploy_steps[0], node.deploy_step)
- self.assertEqual(0, node.driver_internal_info['deploy_step_index'])
- self.assertFalse(node.driver_internal_info['skip_current_deploy_step'])
- mock_execute.assert_called_once_with(
- mock.ANY, mock.ANY, self.deploy_steps[0])
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def test_do_next_clean_step_oob_reboot_fail(self,
- mock_execute):
- # When a deploy step fails with no reboot requested go to DEPLOYFAIL
- tgt_prov_state = states.ACTIVE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'deploy_steps': self.deploy_steps,
- 'deploy_step_index': None},
- deploy_step={})
- mock_execute.side_effect = exception.AgentConnectionFailed(
- reason='failed')
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- manager._do_next_deploy_step(task, 0, mock.ANY)
-
- self._stop_service()
- node.refresh()
-
- # Make sure we go to DEPLOYFAIL, clear deploy_steps
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual({}, node.deploy_step)
- self.assertNotIn('deploy_step_index', node.driver_internal_info)
- self.assertNotIn('skip_current_deploy_step', node.driver_internal_info)
- self.assertIsNotNone(node.last_error)
- mock_execute.assert_called_once_with(
- mock.ANY, mock.ANY, self.deploy_steps[0])
-
-
-@mgr_utils.mock_record_keepalive
-class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
- def test__do_node_deploy_driver_raises_prepare_error(self, mock_prepare,
- mock_deploy):
- self._start_service()
- # test when driver.deploy.prepare raises an ironic error
- mock_prepare.side_effect = exception.InstanceDeployFailure('test')
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.assertRaises(exception.InstanceDeployFailure,
- manager.do_node_deploy, task,
- self.service.conductor.id)
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- # NOTE(deva): failing a deploy does not clear the target state
- # any longer. Instead, it is cleared when the instance
- # is deleted.
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- self.assertTrue(mock_prepare.called)
- self.assertFalse(mock_deploy.called)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
- def test__do_node_deploy_unexpected_prepare_error(self, mock_prepare,
- mock_deploy):
- self._start_service()
- # test when driver.deploy.prepare raises an exception
- mock_prepare.side_effect = RuntimeError('test')
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.assertRaises(RuntimeError,
- manager.do_node_deploy, task,
- self.service.conductor.id)
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- # NOTE(deva): failing a deploy does not clear the target state
- # any longer. Instead, it is cleared when the instance
- # is deleted.
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- self.assertTrue(mock_prepare.called)
- self.assertFalse(mock_deploy.called)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- def test__do_node_deploy_driver_raises_error_old(self, mock_deploy):
- # TODO(rloo): delete this after the deprecation period for supporting
- # non deploy_steps.
- # Mocking FakeDeploy.deploy before starting the service, causes
- # it not to be a deploy_step.
- self._start_service()
- # test when driver.deploy.deploy raises an ironic error
- mock_deploy.side_effect = exception.InstanceDeployFailure('test')
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.assertRaises(exception.InstanceDeployFailure,
- manager.do_node_deploy, task,
- self.service.conductor.id)
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- # NOTE(deva): failing a deploy does not clear the target state
- # any longer. Instead, it is cleared when the instance
- # is deleted.
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- mock_deploy.assert_called_once_with(mock.ANY)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- def test__do_node_deploy_driver_unexpected_exception_old(self,
- mock_deploy):
- # TODO(rloo): delete this after the deprecation period for supporting
- # non deploy_steps.
- # Mocking FakeDeploy.deploy before starting the service, causes
- # it not to be a deploy_step.
- self._start_service()
- # test when driver.deploy.deploy raises an exception
- mock_deploy.side_effect = RuntimeError('test')
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.assertRaises(RuntimeError,
- manager.do_node_deploy, task,
- self.service.conductor.id)
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- # NOTE(deva): failing a deploy does not clear the target state
- # any longer. Instead, it is cleared when the instance
- # is deleted.
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- mock_deploy.assert_called_once_with(mock.ANY)
-
- def _test__do_node_deploy_driver_exception(self, exc, unexpected=False):
- self._start_service()
- with mock.patch.object(fake.FakeDeploy,
- 'deploy', autospec=True) as mock_deploy:
- # test when driver.deploy.deploy() raises an exception
- mock_deploy.side_effect = exc
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- manager.do_node_deploy(task, self.service.conductor.id)
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- # NOTE(deva): failing a deploy does not clear the target state
- # any longer. Instead, it is cleared when the instance
- # is deleted.
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- if unexpected:
- self.assertIn('Exception', node.last_error)
- else:
- self.assertNotIn('Exception', node.last_error)
-
- mock_deploy.assert_called_once_with(mock.ANY, task)
-
- def test__do_node_deploy_driver_ironic_exception(self):
- self._test__do_node_deploy_driver_exception(
- exception.InstanceDeployFailure('test'))
-
- def test__do_node_deploy_driver_unexpected_exception(self):
- self._test__do_node_deploy_driver_exception(RuntimeError('test'),
- unexpected=True)
-
- @mock.patch.object(manager, '_store_configdrive')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- def test__do_node_deploy_ok_old(self, mock_deploy, mock_store):
- # TODO(rloo): delete this after the deprecation period for supporting
- # non deploy_steps.
- # Mocking FakeDeploy.deploy before starting the service, causes
- # it not to be a deploy_step.
- self._start_service()
- # test when driver.deploy.deploy returns DEPLOYDONE
- mock_deploy.return_value = states.DEPLOYDONE
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- manager.do_node_deploy(task, self.service.conductor.id)
- node.refresh()
- self.assertEqual(states.ACTIVE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertIsNone(node.last_error)
- mock_deploy.assert_called_once_with(mock.ANY)
- # assert _store_configdrive wasn't invoked
- self.assertFalse(mock_store.called)
-
- @mock.patch.object(manager, '_store_configdrive')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- def test__do_node_deploy_ok_configdrive_old(self, mock_deploy, mock_store):
- # TODO(rloo): delete this after the deprecation period for supporting
- # non deploy_steps.
- # Mocking FakeDeploy.deploy before starting the service, causes
- # it not to be a deploy_step.
- self._start_service()
- # test when driver.deploy.deploy returns DEPLOYDONE
- mock_deploy.return_value = states.DEPLOYDONE
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
- configdrive = 'foo'
-
- manager.do_node_deploy(task, self.service.conductor.id,
- configdrive=configdrive)
- node.refresh()
- self.assertEqual(states.ACTIVE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertIsNone(node.last_error)
- mock_deploy.assert_called_once_with(mock.ANY)
- mock_store.assert_called_once_with(task.node, configdrive)
-
- @mock.patch.object(manager, '_store_configdrive')
- def _test__do_node_deploy_ok(self, mock_store, configdrive=None,
- expected_configdrive=None):
- expected_configdrive = expected_configdrive or configdrive
- self._start_service()
- with mock.patch.object(fake.FakeDeploy,
- 'deploy', autospec=True) as mock_deploy:
- mock_deploy.return_value = None
- self.node = obj_utils.create_test_node(
- self.context, driver='fake-hardware', name=None,
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, self.node.uuid)
-
- manager.do_node_deploy(task, self.service.conductor.id,
- configdrive=configdrive)
- self.node.refresh()
- self.assertEqual(states.ACTIVE, self.node.provision_state)
- self.assertEqual(states.NOSTATE, self.node.target_provision_state)
- self.assertIsNone(self.node.last_error)
- mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
- if configdrive:
- mock_store.assert_called_once_with(task.node,
- expected_configdrive)
- else:
- self.assertFalse(mock_store.called)
-
- def test__do_node_deploy_ok(self):
- self._test__do_node_deploy_ok()
-
- def test__do_node_deploy_ok_configdrive(self):
- configdrive = 'foo'
- self._test__do_node_deploy_ok(configdrive=configdrive)
-
- @mock.patch('openstack.baremetal.configdrive.build')
- def test__do_node_deploy_configdrive_as_dict(self, mock_cd):
- mock_cd.return_value = 'foo'
- configdrive = {'user_data': 'abcd'}
- self._test__do_node_deploy_ok(configdrive=configdrive,
- expected_configdrive='foo')
- mock_cd.assert_called_once_with({'uuid': self.node.uuid},
- network_data=None,
- user_data=b'abcd',
- vendor_data=None)
-
- @mock.patch('openstack.baremetal.configdrive.build')
- def test__do_node_deploy_configdrive_as_dict_with_meta_data(self, mock_cd):
- mock_cd.return_value = 'foo'
- configdrive = {'meta_data': {'uuid': uuidutils.generate_uuid(),
- 'name': 'new-name',
- 'hostname': 'example.com'}}
- self._test__do_node_deploy_ok(configdrive=configdrive,
- expected_configdrive='foo')
- mock_cd.assert_called_once_with(configdrive['meta_data'],
- network_data=None,
- user_data=None,
- vendor_data=None)
-
- @mock.patch('openstack.baremetal.configdrive.build')
- def test__do_node_deploy_configdrive_with_network_data(self, mock_cd):
- mock_cd.return_value = 'foo'
- configdrive = {'network_data': {'links': []}}
- self._test__do_node_deploy_ok(configdrive=configdrive,
- expected_configdrive='foo')
- mock_cd.assert_called_once_with({'uuid': self.node.uuid},
- network_data={'links': []},
- user_data=None,
- vendor_data=None)
-
- @mock.patch('openstack.baremetal.configdrive.build')
- def test__do_node_deploy_configdrive_and_user_data_as_dict(self, mock_cd):
- mock_cd.return_value = 'foo'
- configdrive = {'user_data': {'user': 'data'}}
- self._test__do_node_deploy_ok(configdrive=configdrive,
- expected_configdrive='foo')
- mock_cd.assert_called_once_with({'uuid': self.node.uuid},
- network_data=None,
- user_data=b'{"user": "data"}',
- vendor_data=None)
-
- @mock.patch('openstack.baremetal.configdrive.build')
- def test__do_node_deploy_configdrive_with_vendor_data(self, mock_cd):
- mock_cd.return_value = 'foo'
- configdrive = {'vendor_data': {'foo': 'bar'}}
- self._test__do_node_deploy_ok(configdrive=configdrive,
- expected_configdrive='foo')
- mock_cd.assert_called_once_with({'uuid': self.node.uuid},
- network_data=None,
- user_data=None,
- vendor_data={'foo': 'bar'})
-
- @mock.patch.object(swift, 'SwiftAPI')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
- def test__do_node_deploy_configdrive_swift_error(self, mock_prepare,
- mock_swift):
- CONF.set_override('configdrive_use_object_store', True,
- group='deploy')
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- mock_swift.side_effect = exception.SwiftOperationError('error')
- self.assertRaises(exception.SwiftOperationError,
- manager.do_node_deploy, task,
- self.service.conductor.id,
- configdrive=b'fake config drive')
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- self.assertFalse(mock_prepare.called)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
- def test__do_node_deploy_configdrive_db_error(self, mock_prepare):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
- task.node.save()
- expected_instance_info = dict(node.instance_info)
- with mock.patch.object(dbapi.IMPL, 'update_node') as mock_db:
- db_node = self.dbapi.get_node_by_uuid(node.uuid)
- mock_db.side_effect = [db_exception.DBDataError('DB error'),
- db_node, db_node, db_node]
- self.assertRaises(db_exception.DBDataError,
- manager.do_node_deploy, task,
- self.service.conductor.id,
- configdrive=b'fake config drive')
- expected_instance_info.update(configdrive=b'fake config drive')
- expected_calls = [
- mock.call(node.uuid,
- {'version': mock.ANY,
- 'instance_info': expected_instance_info}),
- mock.call(node.uuid,
- {'version': mock.ANY,
- 'last_error': mock.ANY}),
- mock.call(node.uuid,
- {'version': mock.ANY,
- 'deploy_step': {},
- 'driver_internal_info': mock.ANY}),
- mock.call(node.uuid,
- {'version': mock.ANY,
- 'provision_state': states.DEPLOYFAIL,
- 'target_provision_state': states.ACTIVE}),
- ]
- self.assertEqual(expected_calls, mock_db.mock_calls)
- self.assertFalse(mock_prepare.called)
-
- @mock.patch.object(manager, '_store_configdrive')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
- def test__do_node_deploy_configdrive_unexpected_error(self, mock_prepare,
- mock_store):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- mock_store.side_effect = RuntimeError('unexpected')
- self.assertRaises(RuntimeError,
- manager.do_node_deploy, task,
- self.service.conductor.id,
- configdrive=b'fake config drive')
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- self.assertFalse(mock_prepare.called)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- def test__do_node_deploy_ok_2_old(self, mock_deploy):
- # TODO(rloo): delete this after the deprecation period for supporting
- # non deploy_steps.
- # Mocking FakeDeploy.deploy before starting the service, causes
- # it not to be a deploy_step.
- # NOTE(rloo): a different way of testing for the same thing as in
- # test__do_node_deploy_ok()
- self._start_service()
- # test when driver.deploy.deploy returns DEPLOYDONE
- mock_deploy.return_value = states.DEPLOYDONE
- node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager.do_node_deploy(task, self.service.conductor.id)
- node.refresh()
- self.assertEqual(states.ACTIVE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertIsNone(node.last_error)
- mock_deploy.assert_called_once_with(mock.ANY)
-
- def test__do_node_deploy_ok_2(self):
- # NOTE(rloo): a different way of testing for the same thing as in
- # test__do_node_deploy_ok(). Instead of specifying the provision &
- # target_provision_states when creating the node, we call
- # task.process_event() to "set the stage" (err "states").
- self._start_service()
- with mock.patch.object(fake.FakeDeploy,
- 'deploy', autospec=True) as mock_deploy:
- # test when driver.deploy.deploy() returns None
- mock_deploy.return_value = None
- node = obj_utils.create_test_node(self.context,
- driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager.do_node_deploy(task, self.service.conductor.id)
- node.refresh()
- self.assertEqual(states.ACTIVE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertIsNone(node.last_error)
- mock_deploy.assert_called_once_with(mock.ANY, mock.ANY)
-
- @mock.patch.object(manager, '_do_next_deploy_step', autospec=True)
- @mock.patch.object(manager, '_old_rest_of_do_node_deploy',
- autospec=True)
- @mock.patch.object(conductor_steps, 'set_node_deployment_steps',
- autospec=True)
- def test_do_node_deploy_deprecated(self, mock_set_steps, mock_old_way,
- mock_deploy_step):
- # TODO(rloo): no deploy steps; delete this when we remove support
- # for handling no deploy steps.
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager.do_node_deploy(task, self.service.conductor.id)
- mock_set_steps.assert_called_once_with(task)
- mock_old_way.assert_called_once_with(task, self.service.conductor.id,
- True)
- self.assertFalse(mock_deploy_step.called)
- self.assertNotIn('deploy_steps', task.node.driver_internal_info)
-
- @mock.patch.object(manager, '_do_next_deploy_step', autospec=True)
- @mock.patch.object(manager, '_old_rest_of_do_node_deploy',
- autospec=True)
- @mock.patch.object(conductor_steps, 'set_node_deployment_steps',
- autospec=True)
- def test_do_node_deploy_steps(self, mock_set_steps, mock_old_way,
- mock_deploy_step):
- # these are not real steps...
- fake_deploy_steps = ['step1', 'step2']
-
- def add_steps(task):
- info = task.node.driver_internal_info
- info['deploy_steps'] = fake_deploy_steps
- task.node.driver_internal_info = info
- task.node.save()
-
- mock_set_steps.side_effect = add_steps
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager.do_node_deploy(task, self.service.conductor.id)
- mock_set_steps.assert_called_once_with(task)
- self.assertFalse(mock_old_way.called)
- mock_set_steps.assert_called_once_with(task)
- self.assertEqual(fake_deploy_steps,
- task.node.driver_internal_info['deploy_steps'])
-
- @mock.patch.object(manager, '_do_next_deploy_step', autospec=True)
- @mock.patch.object(manager, '_old_rest_of_do_node_deploy',
- autospec=True)
- @mock.patch.object(conductor_steps, 'set_node_deployment_steps',
- autospec=True)
- def test_do_node_deploy_steps_old_rpc(self, mock_set_steps, mock_old_way,
- mock_deploy_step):
- # TODO(rloo): old RPC; delete this when we remove support for drivers
- # with no deploy steps.
- CONF.set_override('pin_release_version', '11.0')
- # these are not real steps...
- fake_deploy_steps = ['step1', 'step2']
-
- def add_steps(task):
- info = task.node.driver_internal_info
- info['deploy_steps'] = fake_deploy_steps
- task.node.driver_internal_info = info
- task.node.save()
-
- mock_set_steps.side_effect = add_steps
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager.do_node_deploy(task, self.service.conductor.id)
- mock_set_steps.assert_called_once_with(task)
- mock_old_way.assert_called_once_with(task, self.service.conductor.id,
- False)
- self.assertFalse(mock_deploy_step.called)
- self.assertNotIn('deploy_steps', task.node.driver_internal_info)
-
- @mock.patch.object(manager, '_SEEN_NO_DEPLOY_STEP_DEPRECATIONS',
- autospec=True)
- @mock.patch.object(manager, 'LOG', autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True)
- def test__old_rest_of_do_node_deploy_no_steps(self, mock_deploy, mock_log,
- mock_deprecate):
- # TODO(rloo): no deploy steps; delete this when we remove support
- # for handling no deploy steps.
- mock_deprecate.__contains__.side_effect = [False, True]
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager._old_rest_of_do_node_deploy(task, self.service.conductor.id,
- True)
- mock_deploy.assert_called_once_with(mock.ANY, task)
- self.assertTrue(mock_log.warning.called)
- self.assertEqual(self.service.conductor.id,
- task.node.conductor_affinity)
- mock_deprecate.__contains__.assert_called_once_with('FakeDeploy')
- mock_deprecate.add.assert_called_once_with('FakeDeploy')
-
- # Make sure the deprecation warning isn't logged again
- mock_log.reset_mock()
- mock_deprecate.add.reset_mock()
- manager._old_rest_of_do_node_deploy(task, self.service.conductor.id,
- True)
- self.assertFalse(mock_log.warning.called)
- mock_deprecate.__contains__.assert_has_calls(
- [mock.call('FakeDeploy'), mock.call('FakeDeploy')])
- self.assertFalse(mock_deprecate.add.called)
-
- @mock.patch.object(manager, 'LOG', autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True)
- def test__old_rest_of_do_node_deploy_has_steps(self, mock_deploy,
- mock_log):
- # TODO(rloo): has steps but old RPC; delete this when we remove support
- # for handling no deploy steps.
- manager._SEEN_NO_DEPLOY_STEP_DEPRECATIONS = set()
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager._old_rest_of_do_node_deploy(task, self.service.conductor.id,
- False)
- mock_deploy.assert_called_once_with(mock.ANY, task)
- self.assertFalse(mock_log.warning.called)
- self.assertEqual(self.service.conductor.id,
- task.node.conductor_affinity)
-
- @mock.patch('ironic.conductor.manager._start_console_in_deploy',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True)
- def test__old_rest_of_do_node_deploy_console(self, mock_deploy,
- mock_console):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
- mock_deploy.return_value = states.DEPLOYDONE
-
- manager._old_rest_of_do_node_deploy(task, self.service.conductor.id,
- True)
- mock_deploy.assert_called_once_with(mock.ANY, task)
- mock_console.assert_called_once_with(task)
- self.assertEqual(self.service.conductor.id,
- task.node.conductor_affinity)
-
-
-@mgr_utils.mock_record_keepalive
-class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin,
- db_base.DbTestCase):
- def setUp(self):
- super(DoNextDeployStepTestCase, self).setUp()
- self.deploy_start = {
- 'step': 'deploy_start', 'priority': 50, 'interface': 'deploy'}
- self.deploy_end = {
- 'step': 'deploy_end', 'priority': 20, 'interface': 'deploy'}
- self.deploy_steps = [self.deploy_start, self.deploy_end]
-
- @mock.patch.object(manager, 'LOG', autospec=True)
- def test__do_next_deploy_step_none(self, mock_log):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager._do_next_deploy_step(task, None, self.service.conductor.id)
-
- node.refresh()
- self.assertEqual(states.ACTIVE, node.provision_state)
- self.assertEqual(2, mock_log.info.call_count)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def test__do_next_deploy_step_async(self, mock_execute):
- driver_internal_info = {'deploy_step_index': None,
- 'deploy_steps': self.deploy_steps}
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- driver_internal_info=driver_internal_info,
- deploy_step={})
- mock_execute.return_value = states.DEPLOYWAIT
- expected_first_step = node.driver_internal_info['deploy_steps'][0]
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager._do_next_deploy_step(task, 0, self.service.conductor.id)
-
- node.refresh()
- self.assertEqual(states.DEPLOYWAIT, node.provision_state)
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertEqual(expected_first_step, node.deploy_step)
- self.assertEqual(0, node.driver_internal_info['deploy_step_index'])
- self.assertEqual(self.service.conductor.id, node.conductor_affinity)
- mock_execute.assert_called_once_with(mock.ANY, task,
- self.deploy_steps[0])
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def test__do_next_deploy_step_continue_from_last_step(self, mock_execute):
- # Resume an in-progress deploy after the first async step
- driver_internal_info = {'deploy_step_index': 0,
- 'deploy_steps': self.deploy_steps}
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.DEPLOYWAIT,
- target_provision_state=states.ACTIVE,
- driver_internal_info=driver_internal_info,
- deploy_step=self.deploy_steps[0])
- mock_execute.return_value = states.DEPLOYWAIT
-
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('resume')
-
- manager._do_next_deploy_step(task, 1, self.service.conductor.id)
- node.refresh()
-
- self.assertEqual(states.DEPLOYWAIT, node.provision_state)
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertEqual(self.deploy_steps[1], node.deploy_step)
- self.assertEqual(1, node.driver_internal_info['deploy_step_index'])
- mock_execute.assert_called_once_with(mock.ANY, task,
- self.deploy_steps[1])
-
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def _test__do_next_deploy_step_last_step_done(self, mock_execute,
- mock_console,
- console_enabled=False,
- console_error=False):
- # Resume where last_step is the last deploy step that was executed
- driver_internal_info = {'deploy_step_index': 1,
- 'deploy_steps': self.deploy_steps}
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.DEPLOYWAIT,
- target_provision_state=states.ACTIVE,
- driver_internal_info=driver_internal_info,
- deploy_step=self.deploy_steps[1],
- console_enabled=console_enabled)
- mock_execute.return_value = None
- if console_error:
- mock_console.side_effect = exception.ConsoleError()
-
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('resume')
-
- manager._do_next_deploy_step(task, None, self.service.conductor.id)
- node.refresh()
- # Deploying should be complete without calling additional steps
- self.assertEqual(states.ACTIVE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.deploy_step)
- self.assertNotIn('deploy_step_index', node.driver_internal_info)
- self.assertIsNone(node.driver_internal_info['deploy_steps'])
- self.assertFalse(mock_execute.called)
- if console_enabled:
- mock_console.assert_called_once_with(mock.ANY, task)
- else:
- self.assertFalse(mock_console.called)
-
- def test__do_next_deploy_step_last_step_done(self):
- self._test__do_next_deploy_step_last_step_done()
-
- def test__do_next_deploy_step_last_step_done_with_console(self):
- self._test__do_next_deploy_step_last_step_done(console_enabled=True)
-
- def test__do_next_deploy_step_last_step_done_with_console_error(self):
- self._test__do_next_deploy_step_last_step_done(console_enabled=True,
- console_error=True)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def test__do_next_deploy_step_all(self, mock_execute):
- # Run all steps from start to finish (all synchronous)
- driver_internal_info = {'deploy_step_index': None,
- 'deploy_steps': self.deploy_steps,
- 'agent_url': 'url'}
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- driver_internal_info=driver_internal_info,
- deploy_step={})
- mock_execute.return_value = None
-
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager._do_next_deploy_step(task, 1, self.service.conductor.id)
-
- # Deploying should be complete
- node.refresh()
- self.assertEqual(states.ACTIVE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.deploy_step)
- self.assertNotIn('deploy_step_index', node.driver_internal_info)
- self.assertIsNone(node.driver_internal_info['deploy_steps'])
- mock_execute.assert_has_calls = [mock.call(self.deploy_steps[0]),
- mock.call(self.deploy_steps[1])]
- self.assertNotIn('agent_url', node.driver_internal_info)
-
- @mock.patch.object(conductor_utils, 'LOG', autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def _do_next_deploy_step_execute_fail(self, exc, traceback,
- mock_execute, mock_log):
- # When a deploy step fails, go to DEPLOYFAIL
- driver_internal_info = {'deploy_step_index': None,
- 'deploy_steps': self.deploy_steps}
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- driver_internal_info=driver_internal_info,
- deploy_step={})
- mock_execute.side_effect = exc
-
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager._do_next_deploy_step(task, 0, self.service.conductor.id)
-
- # Make sure we go to DEPLOYFAIL, clear deploy_steps
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertEqual({}, node.deploy_step)
- self.assertNotIn('deploy_step_index', node.driver_internal_info)
- self.assertIsNotNone(node.last_error)
- self.assertFalse(node.maintenance)
- mock_execute.assert_called_once_with(mock.ANY, mock.ANY,
- self.deploy_steps[0])
- mock_log.error.assert_called_once_with(mock.ANY, exc_info=traceback)
-
- def test_do_next_deploy_step_execute_ironic_exception(self):
- self._do_next_deploy_step_execute_fail(
- exception.IronicException('foo'), False)
-
- def test_do_next_deploy_step_execute_exception(self):
- self._do_next_deploy_step_execute_fail(Exception('foo'), True)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def test_do_next_deploy_step_no_steps(self, mock_execute):
-
- self._start_service()
- for info in ({'deploy_steps': None, 'deploy_step_index': None},
- {'deploy_steps': None}):
- # Resume where there are no steps, should be a noop
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- uuid=uuidutils.generate_uuid(),
- last_error=None,
- driver_internal_info=info,
- deploy_step={})
-
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager._do_next_deploy_step(task, None, self.service.conductor.id)
-
- # Deploying should be complete without calling additional steps
- node.refresh()
- self.assertEqual(states.ACTIVE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.deploy_step)
- self.assertNotIn('deploy_step_index', node.driver_internal_info)
- self.assertFalse(mock_execute.called)
- mock_execute.reset_mock()
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
- autospec=True)
- def test_do_next_deploy_step_bad_step_return_value(self, mock_execute):
- # When a deploy step fails, go to DEPLOYFAIL
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- driver_internal_info={'deploy_steps': self.deploy_steps,
- 'deploy_step_index': None},
- deploy_step={})
- mock_execute.return_value = "foo"
-
- task = task_manager.TaskManager(self.context, node.uuid)
- task.process_event('deploy')
-
- manager._do_next_deploy_step(task, 0, self.service.conductor.id)
-
- # Make sure we go to DEPLOYFAIL, clear deploy_steps
- node.refresh()
- self.assertEqual(states.DEPLOYFAIL, node.provision_state)
- self.assertEqual(states.ACTIVE, node.target_provision_state)
- self.assertEqual({}, node.deploy_step)
- self.assertNotIn('deploy_step_index', node.driver_internal_info)
- self.assertIsNotNone(node.last_error)
- self.assertFalse(node.maintenance)
- mock_execute.assert_called_once_with(mock.ANY, mock.ANY,
- self.deploy_steps[0])
-
- def _test__get_node_next_deploy_steps(self, skip=True):
- driver_internal_info = {'deploy_steps': self.deploy_steps,
- 'deploy_step_index': 0}
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.DEPLOYWAIT,
- target_provision_state=states.ACTIVE,
- driver_internal_info=driver_internal_info,
- last_error=None,
- deploy_step=self.deploy_steps[0])
-
- with task_manager.acquire(self.context, node.uuid) as task:
- step_index = self.service._get_node_next_deploy_steps(
- task, skip_current_step=skip)
- expected_index = 1 if skip else 0
- self.assertEqual(expected_index, step_index)
-
- def test__get_node_next_deploy_steps(self):
- self._test__get_node_next_deploy_steps()
-
- def test__get_node_next_deploy_steps_no_skip(self):
- self._test__get_node_next_deploy_steps(skip=False)
-
- def test__get_node_next_deploy_steps_unset_deploy_step(self):
- driver_internal_info = {'deploy_steps': self.deploy_steps,
- 'deploy_step_index': None}
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.DEPLOYWAIT,
- target_provision_state=states.ACTIVE,
- driver_internal_info=driver_internal_info,
- last_error=None,
- deploy_step=None)
-
- with task_manager.acquire(self.context, node.uuid) as task:
- step_index = self.service._get_node_next_deploy_steps(task)
- self.assertEqual(0, step_index)
-
- def test__get_node_next_steps_exception(self):
- node = obj_utils.create_test_node(self.context)
-
- with task_manager.acquire(self.context, node.uuid) as task:
- self.assertRaises(exception.Invalid,
- self.service._get_node_next_steps, task, 'foo')
-
@mgr_utils.mock_record_keepalive
class CheckTimeoutsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@@ -3050,7 +2184,7 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
# by default, the fake drivers still invoke neutron.
@mock.patch('ironic.drivers.modules.fake.FakeConsole.stop_console')
@mock.patch('ironic.common.neutron.unbind_neutron_port')
- @mock.patch('ironic.conductor.manager.ConductorManager._do_node_clean')
+ @mock.patch('ironic.conductor.cleaning.do_node_clean')
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down')
def _test__do_node_tear_down_ok(self, mock_tear_down, mock_clean,
mock_unbind, mock_console,
@@ -3118,7 +2252,7 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._test__do_node_tear_down_ok(with_allocation=True)
@mock.patch('ironic.drivers.modules.fake.FakeRescue.clean_up')
- @mock.patch('ironic.conductor.manager.ConductorManager._do_node_clean')
+ @mock.patch('ironic.conductor.cleaning.do_node_clean')
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down')
def _test_do_node_tear_down_from_state(self, init_state, is_rescue_state,
mock_tear_down, mock_clean,
@@ -3245,7 +2379,7 @@ class DoProvisioningActionTestCase(mgr_utils.ServiceSetUpMixin,
self.assertEqual(states.AVAILABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_spawn.assert_called_with(self.service,
- self.service._do_node_clean, mock.ANY)
+ cleaning.do_node_clean, mock.ANY)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
@@ -3308,7 +2442,7 @@ class DoProvisioningActionTestCase(mgr_utils.ServiceSetUpMixin,
self.assertEqual(tgt_prov_state, node.target_provision_state)
self.assertIsNone(node.last_error)
mock_spawn.assert_called_with(
- self.service, self.service._do_node_clean_abort, mock.ANY)
+ self.service, cleaning.do_node_clean_abort, mock.ANY)
def test_do_provision_action_abort_automated_clean(self):
self._do_provision_action_abort()
@@ -3335,64 +2469,6 @@ class DoProvisioningActionTestCase(mgr_utils.ServiceSetUpMixin,
@mgr_utils.mock_record_keepalive
-class DoNodeCleanAbortTestCase(mgr_utils.ServiceSetUpMixin,
- db_base.DbTestCase):
- @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
- def _test__do_node_clean_abort(self, step_name, tear_mock):
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANFAIL,
- target_provision_state=states.AVAILABLE,
- clean_step={'step': 'foo', 'abortable': True},
- driver_internal_info={
- 'clean_step_index': 2,
- 'cleaning_reboot': True,
- 'cleaning_polling': True,
- 'skip_current_clean_step': True})
-
- with task_manager.acquire(self.context, node.uuid) as task:
- self.service._do_node_clean_abort(task, step_name=step_name)
- self.assertIsNotNone(task.node.last_error)
- tear_mock.assert_called_once_with(task.driver.deploy, task)
- if step_name:
- self.assertIn(step_name, task.node.last_error)
- # assert node's clean_step and metadata was cleaned up
- self.assertEqual({}, task.node.clean_step)
- self.assertNotIn('clean_step_index',
- task.node.driver_internal_info)
- self.assertNotIn('cleaning_reboot',
- task.node.driver_internal_info)
- self.assertNotIn('cleaning_polling',
- task.node.driver_internal_info)
- self.assertNotIn('skip_current_clean_step',
- task.node.driver_internal_info)
-
- def test__do_node_clean_abort(self):
- self._test__do_node_clean_abort(None)
-
- def test__do_node_clean_abort_with_step_name(self):
- self._test__do_node_clean_abort('foo')
-
- @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
- def test__do_node_clean_abort_tear_down_fail(self, tear_mock):
- tear_mock.side_effect = Exception('Surprise')
-
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANFAIL,
- target_provision_state=states.AVAILABLE,
- clean_step={'step': 'foo', 'abortable': True})
-
- with task_manager.acquire(self.context, node.uuid) as task:
- self.service._do_node_clean_abort(task)
- tear_mock.assert_called_once_with(task.driver.deploy, task)
- self.assertIsNotNone(task.node.last_error)
- self.assertIsNotNone(task.node.maintenance_reason)
- self.assertTrue(task.node.maintenance)
- self.assertEqual('clean failure', task.node.fault)
-
-
-@mgr_utils.mock_record_keepalive
class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def setUp(self):
super(DoNodeCleanTestCase, self).setUp()
@@ -3494,7 +2570,7 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_power_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_network_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_spawn.assert_called_with(
- self.service, self.service._do_node_clean, mock.ANY, clean_steps)
+ self.service, cleaning.do_node_clean, mock.ANY, clean_steps)
node.refresh()
# Node will be moved to CLEANING
self.assertEqual(states.CLEANING, node.provision_state)
@@ -3524,7 +2600,7 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_power_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_network_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_spawn.assert_called_with(
- self.service, self.service._do_node_clean, mock.ANY, clean_steps)
+ self.service, cleaning.do_node_clean, mock.ANY, clean_steps)
node.refresh()
# Node will be moved to CLEANING
self.assertEqual(states.CLEANING, node.provision_state)
@@ -3558,7 +2634,7 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_power_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_network_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_spawn.assert_called_with(
- self.service, self.service._do_node_clean, mock.ANY, clean_steps)
+ self.service, cleaning.do_node_clean, mock.ANY, clean_steps)
node.refresh()
# Make sure states were rolled back
self.assertEqual(prv_state, node.provision_state)
@@ -3631,7 +2707,7 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.CLEANING, node.provision_state)
self.assertEqual(tgt_prv_state, node.target_provision_state)
mock_spawn.assert_called_with(self.service,
- self.service._do_next_clean_step,
+ cleaning.do_next_clean_step,
mock.ANY, self.next_clean_step_index)
def test_continue_node_clean_automated(self):
@@ -3664,7 +2740,7 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
'skip_current_clean_step', node.driver_internal_info)
expected_step_index = 0
mock_spawn.assert_called_with(self.service,
- self.service._do_next_clean_step,
+ cleaning.do_next_clean_step,
mock.ANY, expected_step_index)
def test_continue_node_clean_skip_step(self):
@@ -3691,7 +2767,7 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
self.assertNotIn('cleaning_polling', node.driver_internal_info)
mock_spawn.assert_called_with(self.service,
- self.service._do_next_clean_step,
+ cleaning.do_next_clean_step,
mock.ANY, 1)
def _continue_node_clean_abort(self, manual=False):
@@ -3750,945 +2826,6 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_continue_node_clean_manual_abort_last_clean_step(self):
self._continue_node_clean_abort_last_clean_step(manual=True)
- def __do_node_clean_validate_fail(self, mock_validate, clean_steps=None):
- # InvalidParameterValue should cause node to go to CLEANFAIL
- mock_validate.side_effect = exception.InvalidParameterValue('error')
- tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task, clean_steps=clean_steps)
- node.refresh()
- self.assertEqual(states.CLEANFAIL, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- mock_validate.assert_called_once_with(mock.ANY, mock.ANY)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- def test__do_node_clean_automated_power_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- def test__do_node_clean_manual_power_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate, clean_steps=[])
-
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- def test__do_node_clean_automated_network_validate_fail(self,
- mock_validate):
- self.__do_node_clean_validate_fail(mock_validate)
-
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- def test__do_node_clean_manual_network_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate, clean_steps=[])
-
- @mock.patch.object(manager, 'LOG', autospec=True)
- @mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
- autospec=True)
- @mock.patch('ironic.conductor.manager.ConductorManager.'
- '_do_next_clean_step', autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning',
- autospec=True)
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeBIOS.cache_bios_settings',
- autospec=True)
- def _test__do_node_clean_cache_bios(self, mock_bios, mock_validate,
- mock_prep, mock_next_step, mock_steps,
- mock_log, clean_steps=None,
- enable_unsupported=False,
- enable_exception=False):
- if enable_unsupported:
- mock_bios.side_effect = exception.UnsupportedDriverExtension('')
- elif enable_exception:
- mock_bios.side_effect = exception.IronicException('test')
- mock_prep.return_value = states.NOSTATE
- tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task, clean_steps=clean_steps)
- node.refresh()
- mock_bios.assert_called_once_with(mock.ANY, task)
- if clean_steps:
- self.assertEqual(states.CLEANING, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- else:
- self.assertEqual(states.CLEANING, node.provision_state)
- self.assertEqual(states.AVAILABLE, node.target_provision_state)
- mock_validate.assert_called_once_with(mock.ANY, task)
- if enable_exception:
- mock_log.exception.assert_called_once_with(
- 'Caching of bios settings failed on node {}. '
- 'Continuing with node cleaning.'
- .format(node.uuid))
-
- def test__do_node_clean_manual_cache_bios(self):
- self._test__do_node_clean_cache_bios(clean_steps=[self.deploy_raid])
-
- def test__do_node_clean_automated_cache_bios(self):
- self._test__do_node_clean_cache_bios()
-
- def test__do_node_clean_manual_cache_bios_exception(self):
- self._test__do_node_clean_cache_bios(clean_steps=[self.deploy_raid],
- enable_exception=True)
-
- def test__do_node_clean_automated_cache_bios_exception(self):
- self._test__do_node_clean_cache_bios(enable_exception=True)
-
- def test__do_node_clean_manual_cache_bios_unsupported(self):
- self._test__do_node_clean_cache_bios(clean_steps=[self.deploy_raid],
- enable_unsupported=True)
-
- def test__do_node_clean_automated_cache_bios_unsupported(self):
- self._test__do_node_clean_cache_bios(enable_unsupported=True)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- def test__do_node_clean_automated_disabled(self, mock_validate):
- self.config(automated_clean=False, group='conductor')
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=states.AVAILABLE,
- last_error=None)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task)
- self._stop_service()
- node.refresh()
-
- # Assert that the node was moved to available without cleaning
- self.assertFalse(mock_validate.called)
- self.assertEqual(states.AVAILABLE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_steps', node.driver_internal_info)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- def test__do_node_clean_automated_disabled_individual_enabled(
- self, mock_network, mock_validate):
- self.config(automated_clean=False, group='conductor')
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=states.AVAILABLE,
- last_error=None, automated_clean=True)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task)
- self._stop_service()
- node.refresh()
-
- # Assert that the node clean was called
- self.assertTrue(mock_validate.called)
- self.assertIn('clean_steps', node.driver_internal_info)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- def test__do_node_clean_automated_disabled_individual_disabled(
- self, mock_validate):
- self.config(automated_clean=False, group='conductor')
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=states.AVAILABLE,
- last_error=None, automated_clean=False)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task)
- self._stop_service()
- node.refresh()
-
- # Assert that the node was moved to available without cleaning
- self.assertFalse(mock_validate.called)
- self.assertEqual(states.AVAILABLE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_steps', node.driver_internal_info)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- def test__do_node_clean_automated_enabled(self, mock_validate,
- mock_network):
- self.config(automated_clean=True, group='conductor')
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=states.AVAILABLE,
- last_error=None,
- driver_internal_info={'agent_url': 'url'})
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task)
- self._stop_service()
- node.refresh()
-
- # Assert that the node was cleaned
- self.assertTrue(mock_validate.called)
- self.assertIn('clean_steps', node.driver_internal_info)
- self.assertNotIn('agent_url', node.driver_internal_info)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- def test__do_node_clean_automated_enabled_individual_enabled(
- self, mock_network, mock_validate):
- self.config(automated_clean=True, group='conductor')
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=states.AVAILABLE,
- last_error=None, automated_clean=True)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task)
- self._stop_service()
- node.refresh()
-
- # Assert that the node was cleaned
- self.assertTrue(mock_validate.called)
- self.assertIn('clean_steps', node.driver_internal_info)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- def test__do_node_clean_automated_enabled_individual_none(
- self, mock_validate, mock_network):
- self.config(automated_clean=True, group='conductor')
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=states.AVAILABLE,
- last_error=None, automated_clean=None)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task)
- self._stop_service()
- node.refresh()
-
- # Assert that the node was cleaned
- self.assertTrue(mock_validate.called)
- self.assertIn('clean_steps', node.driver_internal_info)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down_cleaning',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning',
- autospec=True)
- def test__do_node_clean_maintenance(self, mock_prep, mock_tear_down):
- CONF.set_override('allow_provisioning_in_maintenance', False,
- group='conductor')
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=states.AVAILABLE,
- maintenance=True,
- maintenance_reason='Original reason')
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task)
- node.refresh()
- self.assertEqual(states.CLEANFAIL, node.provision_state)
- self.assertEqual(states.AVAILABLE, node.target_provision_state)
- self.assertIn('is not allowed', node.last_error)
- self.assertTrue(node.maintenance)
- self.assertEqual('Original reason', node.maintenance_reason)
- self.assertFalse(mock_prep.called)
- self.assertFalse(mock_tear_down.called)
-
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning',
- autospec=True)
- def __do_node_clean_prepare_clean_fail(self, mock_prep, mock_validate,
- clean_steps=None):
- # Exception from task.driver.deploy.prepare_cleaning should cause node
- # to go to CLEANFAIL
- mock_prep.side_effect = exception.InvalidParameterValue('error')
- tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task, clean_steps=clean_steps)
- node.refresh()
- self.assertEqual(states.CLEANFAIL, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- mock_prep.assert_called_once_with(mock.ANY, task)
- mock_validate.assert_called_once_with(mock.ANY, task)
-
- def test__do_node_clean_automated_prepare_clean_fail(self):
- self.__do_node_clean_prepare_clean_fail()
-
- def test__do_node_clean_manual_prepare_clean_fail(self):
- self.__do_node_clean_prepare_clean_fail(clean_steps=[self.deploy_raid])
-
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare_cleaning',
- autospec=True)
- def __do_node_clean_prepare_clean_wait(self, mock_prep, mock_validate,
- clean_steps=None):
- mock_prep.return_value = states.CLEANWAIT
- tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task, clean_steps=clean_steps)
- node.refresh()
- self.assertEqual(states.CLEANWAIT, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- mock_prep.assert_called_once_with(mock.ANY, mock.ANY)
- mock_validate.assert_called_once_with(mock.ANY, mock.ANY)
-
- def test__do_node_clean_automated_prepare_clean_wait(self):
- self.__do_node_clean_prepare_clean_wait()
-
- def test__do_node_clean_manual_prepare_clean_wait(self):
- self.__do_node_clean_prepare_clean_wait(clean_steps=[self.deploy_raid])
-
- @mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
- @mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
- autospec=True)
- def __do_node_clean_steps_fail(self, mock_steps, mock_validate,
- clean_steps=None, invalid_exc=True):
- if invalid_exc:
- mock_steps.side_effect = exception.InvalidParameterValue('invalid')
- else:
- mock_steps.side_effect = exception.NodeCleaningFailure('failure')
- tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- uuid=uuidutils.generate_uuid(),
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state)
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task, clean_steps=clean_steps)
- mock_validate.assert_called_once_with(mock.ANY, task)
- node.refresh()
- self.assertEqual(states.CLEANFAIL, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- mock_steps.assert_called_once_with(mock.ANY)
-
- def test__do_node_clean_automated_steps_fail(self):
- for invalid in (True, False):
- self.__do_node_clean_steps_fail(invalid_exc=invalid)
-
- def test__do_node_clean_manual_steps_fail(self):
- for invalid in (True, False):
- self.__do_node_clean_steps_fail(clean_steps=[self.deploy_raid],
- invalid_exc=invalid)
-
- @mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
- autospec=True)
- @mock.patch('ironic.conductor.manager.ConductorManager.'
- '_do_next_clean_step', autospec=True)
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
- autospec=True)
- def __do_node_clean(self, mock_power_valid, mock_network_valid,
- mock_next_step, mock_steps, clean_steps=None):
- if clean_steps:
- tgt_prov_state = states.MANAGEABLE
- driver_info = {}
- else:
- tgt_prov_state = states.AVAILABLE
- driver_info = {'clean_steps': self.clean_steps}
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- power_state=states.POWER_OFF,
- driver_internal_info=driver_info)
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_node_clean(task, clean_steps=clean_steps)
-
- self._stop_service()
- node.refresh()
-
- mock_power_valid.assert_called_once_with(mock.ANY, task)
- mock_network_valid.assert_called_once_with(mock.ANY, task)
- mock_next_step.assert_called_once_with(mock.ANY, task, 0)
- mock_steps.assert_called_once_with(task)
- if clean_steps:
- self.assertEqual(clean_steps,
- node.driver_internal_info['clean_steps'])
-
- # Check that state didn't change
- self.assertEqual(states.CLEANING, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
-
- def test__do_node_clean_automated(self):
- self.__do_node_clean()
-
- def test__do_node_clean_manual(self):
- self.__do_node_clean(clean_steps=[self.deploy_raid])
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- def _do_next_clean_step_first_step_async(self, return_state, mock_execute,
- clean_steps=None):
- # Execute the first async clean step on a node
- driver_internal_info = {'clean_step_index': None}
- if clean_steps:
- tgt_prov_state = states.MANAGEABLE
- driver_internal_info['clean_steps'] = clean_steps
- else:
- tgt_prov_state = states.AVAILABLE
- driver_internal_info['clean_steps'] = self.clean_steps
-
- self._start_service()
-
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info=driver_internal_info,
- clean_step={})
- mock_execute.return_value = return_state
- expected_first_step = node.driver_internal_info['clean_steps'][0]
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, 0)
-
- self._stop_service()
- node.refresh()
-
- self.assertEqual(states.CLEANWAIT, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual(expected_first_step, node.clean_step)
- self.assertEqual(0, node.driver_internal_info['clean_step_index'])
- mock_execute.assert_called_once_with(
- mock.ANY, mock.ANY, expected_first_step)
-
- def test_do_next_clean_step_automated_first_step_async(self):
- self._do_next_clean_step_first_step_async(states.CLEANWAIT)
-
- def test_do_next_clean_step_manual_first_step_async(self):
- self._do_next_clean_step_first_step_async(
- states.CLEANWAIT, clean_steps=[self.deploy_raid])
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
- autospec=True)
- def _do_next_clean_step_continue_from_last_cleaning(self, return_state,
- mock_execute,
- manual=False):
- # Resume an in-progress cleaning after the first async step
- tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'clean_steps': self.clean_steps,
- 'clean_step_index': 0},
- clean_step=self.clean_steps[0])
- mock_execute.return_value = return_state
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, self.next_clean_step_index)
-
- self._stop_service()
- node.refresh()
-
- self.assertEqual(states.CLEANWAIT, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual(self.clean_steps[1], node.clean_step)
- self.assertEqual(1, node.driver_internal_info['clean_step_index'])
- mock_execute.assert_called_once_with(
- mock.ANY, mock.ANY, self.clean_steps[1])
-
- def test_do_next_clean_step_continue_from_last_cleaning(self):
- self._do_next_clean_step_continue_from_last_cleaning(states.CLEANWAIT)
-
- def test_do_next_clean_step_manual_continue_from_last_cleaning(self):
- self._do_next_clean_step_continue_from_last_cleaning(states.CLEANWAIT,
- manual=True)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- def _do_next_clean_step_last_step_noop(self, mock_execute, manual=False):
- # Resume where last_step is the last cleaning step, should be noop
- tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
- info = {'clean_steps': self.clean_steps,
- 'clean_step_index': len(self.clean_steps) - 1}
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info=info,
- clean_step=self.clean_steps[-1])
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, None)
-
- self._stop_service()
- node.refresh()
-
- # Cleaning should be complete without calling additional steps
- self.assertEqual(tgt_prov_state, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
- self.assertIsNone(node.driver_internal_info['clean_steps'])
- self.assertFalse(mock_execute.called)
-
- def test__do_next_clean_step_automated_last_step_noop(self):
- self._do_next_clean_step_last_step_noop()
-
- def test__do_next_clean_step_manual_last_step_noop(self):
- self._do_next_clean_step_last_step_noop(manual=True)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- def _do_next_clean_step_all(self, mock_deploy_execute,
- mock_power_execute, manual=False):
- # Run all steps from start to finish (all synchronous)
- tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'clean_steps': self.clean_steps,
- 'clean_step_index': None},
- clean_step={})
-
- def fake_deploy(conductor_obj, task, step):
- driver_internal_info = task.node.driver_internal_info
- driver_internal_info['goober'] = 'test'
- task.node.driver_internal_info = driver_internal_info
- task.node.save()
-
- mock_deploy_execute.side_effect = fake_deploy
- mock_power_execute.return_value = None
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, 0)
-
- self._stop_service()
- node.refresh()
-
- # Cleaning should be complete
- self.assertEqual(tgt_prov_state, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
- self.assertEqual('test', node.driver_internal_info['goober'])
- self.assertIsNone(node.driver_internal_info['clean_steps'])
- mock_power_execute.assert_called_once_with(mock.ANY, mock.ANY,
- self.clean_steps[1])
- mock_deploy_execute.assert_has_calls(
- [mock.call(mock.ANY, mock.ANY, self.clean_steps[0]),
- mock.call(mock.ANY, mock.ANY, self.clean_steps[2])])
-
- def test_do_next_clean_step_automated_all(self):
- self._do_next_clean_step_all()
-
- def test_do_next_clean_step_manual_all(self):
- self._do_next_clean_step_all(manual=True)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
- def _do_next_clean_step_execute_fail(self, tear_mock, mock_execute,
- manual=False):
- # When a clean step fails, go to CLEANFAIL
- tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'clean_steps': self.clean_steps,
- 'clean_step_index': None},
- clean_step={})
- mock_execute.side_effect = Exception()
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, 0)
- tear_mock.assert_called_once_with(task.driver.deploy, task)
-
- self._stop_service()
- node.refresh()
-
- # Make sure we go to CLEANFAIL, clear clean_steps
- self.assertEqual(states.CLEANFAIL, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
- self.assertIsNotNone(node.last_error)
- self.assertTrue(node.maintenance)
- mock_execute.assert_called_once_with(
- mock.ANY, mock.ANY, self.clean_steps[0])
-
- def test__do_next_clean_step_automated_execute_fail(self):
- self._do_next_clean_step_execute_fail()
-
- def test__do_next_clean_step_manual_execute_fail(self):
- self._do_next_clean_step_execute_fail(manual=True)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- def test_do_next_clean_step_oob_reboot(self, mock_execute):
- # When a clean step fails, go to CLEANWAIT
- tgt_prov_state = states.MANAGEABLE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'clean_steps': self.clean_steps,
- 'clean_step_index': None,
- 'cleaning_reboot': True},
- clean_step={})
- mock_execute.side_effect = exception.AgentConnectionFailed(
- reason='failed')
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, 0)
-
- self._stop_service()
- node.refresh()
-
- # Make sure we go to CLEANWAIT
- self.assertEqual(states.CLEANWAIT, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual(self.clean_steps[0], node.clean_step)
- self.assertEqual(0, node.driver_internal_info['clean_step_index'])
- self.assertFalse(node.driver_internal_info['skip_current_clean_step'])
- mock_execute.assert_called_once_with(
- mock.ANY, mock.ANY, self.clean_steps[0])
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- def test_do_next_clean_step_oob_reboot_last_step(self, mock_execute):
- # Resume where last_step is the last cleaning step
- tgt_prov_state = states.MANAGEABLE
- info = {'clean_steps': self.clean_steps,
- 'cleaning_reboot': True,
- 'clean_step_index': len(self.clean_steps) - 1}
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info=info,
- clean_step=self.clean_steps[-1])
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, None)
-
- self._stop_service()
- node.refresh()
-
- # Cleaning should be complete without calling additional steps
- self.assertEqual(tgt_prov_state, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
- self.assertNotIn('cleaning_reboot', node.driver_internal_info)
- self.assertIsNone(node.driver_internal_info['clean_steps'])
- self.assertFalse(mock_execute.called)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
- def test_do_next_clean_step_oob_reboot_fail(self, tear_mock,
- mock_execute):
- # When a clean step fails with no reboot requested go to CLEANFAIL
- tgt_prov_state = states.MANAGEABLE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'clean_steps': self.clean_steps,
- 'clean_step_index': None},
- clean_step={})
- mock_execute.side_effect = exception.AgentConnectionFailed(
- reason='failed')
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, 0)
- tear_mock.assert_called_once_with(task.driver.deploy, task)
-
- self._stop_service()
- node.refresh()
-
- # Make sure we go to CLEANFAIL, clear clean_steps
- self.assertEqual(states.CLEANFAIL, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
- self.assertNotIn('skip_current_clean_step', node.driver_internal_info)
- self.assertIsNotNone(node.last_error)
- self.assertTrue(node.maintenance)
- mock_execute.assert_called_once_with(
- mock.ANY, mock.ANY, self.clean_steps[0])
-
- @mock.patch.object(manager, 'LOG', autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
- autospec=True)
- @mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
- def _do_next_clean_step_fail_in_tear_down_cleaning(
- self, tear_mock, power_exec_mock, deploy_exec_mock, log_mock,
- manual=True):
- tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'clean_steps': self.clean_steps,
- 'clean_step_index': None},
- clean_step={})
-
- deploy_exec_mock.return_value = None
- power_exec_mock.return_value = None
- tear_mock.side_effect = Exception('boom')
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, 0)
-
- self._stop_service()
- node.refresh()
-
- # Make sure we go to CLEANFAIL, clear clean_steps
- self.assertEqual(states.CLEANFAIL, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
- self.assertIsNotNone(node.last_error)
- self.assertEqual(1, tear_mock.call_count)
- self.assertTrue(node.maintenance)
- deploy_exec_calls = [
- mock.call(mock.ANY, mock.ANY, self.clean_steps[0]),
- mock.call(mock.ANY, mock.ANY, self.clean_steps[2]),
- ]
- self.assertEqual(deploy_exec_calls, deploy_exec_mock.call_args_list)
-
- power_exec_calls = [
- mock.call(mock.ANY, mock.ANY, self.clean_steps[1]),
- ]
- self.assertEqual(power_exec_calls, power_exec_mock.call_args_list)
- log_mock.exception.assert_called_once_with(
- 'Failed to tear down from cleaning for node {}, reason: boom'
- .format(node.uuid))
-
- def test__do_next_clean_step_automated_fail_in_tear_down_cleaning(self):
- self._do_next_clean_step_fail_in_tear_down_cleaning()
-
- def test__do_next_clean_step_manual_fail_in_tear_down_cleaning(self):
- self._do_next_clean_step_fail_in_tear_down_cleaning(manual=True)
-
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- def _do_next_clean_step_no_steps(self, mock_execute, manual=False,
- fast_track=False):
- if fast_track:
- self.config(fast_track=True, group='deploy')
-
- for info in ({'clean_steps': None, 'clean_step_index': None,
- 'agent_url': 'test-url'},
- {'clean_steps': None, 'agent_url': 'test-url'}):
- # Resume where there are no steps, should be a noop
- tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- uuid=uuidutils.generate_uuid(),
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info=info,
- clean_step={})
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, None)
-
- self._stop_service()
- node.refresh()
-
- # Cleaning should be complete without calling additional steps
- self.assertEqual(tgt_prov_state, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
- self.assertFalse(mock_execute.called)
- if fast_track:
- self.assertEqual('test-url',
- node.driver_internal_info.get('agent_url'))
- else:
- self.assertNotIn('agent_url', node.driver_internal_info)
- mock_execute.reset_mock()
-
- def test__do_next_clean_step_automated_no_steps(self):
- self._do_next_clean_step_no_steps()
-
- def test__do_next_clean_step_manual_no_steps(self):
- self._do_next_clean_step_no_steps(manual=True)
-
- def test__do_next_clean_step_fast_track(self):
- self._do_next_clean_step_no_steps(fast_track=True)
-
- @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
- autospec=True)
- def _do_next_clean_step_bad_step_return_value(
- self, deploy_exec_mock, power_exec_mock, manual=False):
- # When a clean step fails, go to CLEANFAIL
- tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
-
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANING,
- target_provision_state=tgt_prov_state,
- last_error=None,
- driver_internal_info={'clean_steps': self.clean_steps,
- 'clean_step_index': None},
- clean_step={})
- deploy_exec_mock.return_value = "foo"
-
- with task_manager.acquire(
- self.context, node.uuid, shared=False) as task:
- self.service._do_next_clean_step(task, 0)
-
- self._stop_service()
- node.refresh()
-
- # Make sure we go to CLEANFAIL, clear clean_steps
- self.assertEqual(states.CLEANFAIL, node.provision_state)
- self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertEqual({}, node.clean_step)
- self.assertNotIn('clean_step_index', node.driver_internal_info)
- self.assertIsNotNone(node.last_error)
- self.assertTrue(node.maintenance)
- deploy_exec_mock.assert_called_once_with(mock.ANY, mock.ANY,
- self.clean_steps[0])
- # Make sure we don't execute any other step and return
- self.assertFalse(power_exec_mock.called)
-
- def test__do_next_clean_step_automated_bad_step_return_value(self):
- self._do_next_clean_step_bad_step_return_value()
-
- def test__do_next_clean_step_manual_bad_step_return_value(self):
- self._do_next_clean_step_bad_step_return_value(manual=True)
-
- def __get_node_next_clean_steps(self, skip=True):
- driver_internal_info = {'clean_steps': self.clean_steps,
- 'clean_step_index': 0}
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANWAIT,
- target_provision_state=states.AVAILABLE,
- driver_internal_info=driver_internal_info,
- last_error=None,
- clean_step=self.clean_steps[0])
-
- with task_manager.acquire(self.context, node.uuid) as task:
- step_index = self.service._get_node_next_clean_steps(
- task, skip_current_step=skip)
- expected_index = 1 if skip else 0
- self.assertEqual(expected_index, step_index)
-
- def test__get_node_next_clean_steps(self):
- self.__get_node_next_clean_steps()
-
- def test__get_node_next_clean_steps_no_skip(self):
- self.__get_node_next_clean_steps(skip=False)
-
- def test__get_node_next_clean_steps_unset_clean_step(self):
- driver_internal_info = {'clean_steps': self.clean_steps,
- 'clean_step_index': None}
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.CLEANWAIT,
- target_provision_state=states.AVAILABLE,
- driver_internal_info=driver_internal_info,
- last_error=None,
- clean_step=None)
-
- with task_manager.acquire(self.context, node.uuid) as task:
- step_index = self.service._get_node_next_clean_steps(task)
- self.assertEqual(0, step_index)
-
class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
@@ -7867,105 +6004,6 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self.service._do_takeover, self.task)
-@mock.patch.object(swift, 'SwiftAPI')
-class StoreConfigDriveTestCase(db_base.DbTestCase):
-
- def setUp(self):
- super(StoreConfigDriveTestCase, self).setUp()
- self.node = obj_utils.create_test_node(self.context,
- driver='fake-hardware',
- instance_info=None)
-
- def test_store_configdrive(self, mock_swift):
- manager._store_configdrive(self.node, 'foo')
- expected_instance_info = {'configdrive': 'foo'}
- self.node.refresh()
- self.assertEqual(expected_instance_info, self.node.instance_info)
- self.assertFalse(mock_swift.called)
-
- def test_store_configdrive_swift(self, mock_swift):
- container_name = 'foo_container'
- timeout = 123
- expected_obj_name = 'configdrive-%s' % self.node.uuid
- expected_obj_header = {'X-Delete-After': str(timeout)}
- expected_instance_info = {'configdrive': 'http://1.2.3.4'}
-
- # set configs and mocks
- CONF.set_override('configdrive_use_object_store', True,
- group='deploy')
- CONF.set_override('configdrive_swift_container', container_name,
- group='conductor')
- CONF.set_override('deploy_callback_timeout', timeout,
- group='conductor')
- mock_swift.return_value.get_temp_url.return_value = 'http://1.2.3.4'
-
- manager._store_configdrive(self.node, b'foo')
-
- mock_swift.assert_called_once_with()
- mock_swift.return_value.create_object.assert_called_once_with(
- container_name, expected_obj_name, mock.ANY,
- object_headers=expected_obj_header)
- mock_swift.return_value.get_temp_url.assert_called_once_with(
- container_name, expected_obj_name, timeout)
- self.node.refresh()
- self.assertEqual(expected_instance_info, self.node.instance_info)
-
- def test_store_configdrive_swift_no_deploy_timeout(self, mock_swift):
- container_name = 'foo_container'
- expected_obj_name = 'configdrive-%s' % self.node.uuid
- expected_obj_header = {'X-Delete-After': '1200'}
- expected_instance_info = {'configdrive': 'http://1.2.3.4'}
-
- # set configs and mocks
- CONF.set_override('configdrive_use_object_store', True,
- group='deploy')
- CONF.set_override('configdrive_swift_container', container_name,
- group='conductor')
- CONF.set_override('configdrive_swift_temp_url_duration', 1200,
- group='conductor')
- CONF.set_override('deploy_callback_timeout', 0,
- group='conductor')
- mock_swift.return_value.get_temp_url.return_value = 'http://1.2.3.4'
-
- manager._store_configdrive(self.node, b'foo')
-
- mock_swift.assert_called_once_with()
- mock_swift.return_value.create_object.assert_called_once_with(
- container_name, expected_obj_name, mock.ANY,
- object_headers=expected_obj_header)
- mock_swift.return_value.get_temp_url.assert_called_once_with(
- container_name, expected_obj_name, 1200)
- self.node.refresh()
- self.assertEqual(expected_instance_info, self.node.instance_info)
-
- def test_store_configdrive_swift_no_deploy_timeout_fallback(self,
- mock_swift):
- container_name = 'foo_container'
- expected_obj_name = 'configdrive-%s' % self.node.uuid
- expected_obj_header = {'X-Delete-After': '1800'}
- expected_instance_info = {'configdrive': 'http://1.2.3.4'}
-
- # set configs and mocks
- CONF.set_override('configdrive_use_object_store', True,
- group='deploy')
- CONF.set_override('configdrive_swift_container', container_name,
- group='conductor')
- CONF.set_override('deploy_callback_timeout', 0,
- group='conductor')
- mock_swift.return_value.get_temp_url.return_value = 'http://1.2.3.4'
-
- manager._store_configdrive(self.node, b'foo')
-
- mock_swift.assert_called_once_with()
- mock_swift.return_value.create_object.assert_called_once_with(
- container_name, expected_obj_name, mock.ANY,
- object_headers=expected_obj_header)
- mock_swift.return_value.get_temp_url.assert_called_once_with(
- container_name, expected_obj_name, 1800)
- self.node.refresh()
- self.assertEqual(expected_instance_info, self.node.instance_info)
-
-
@mgr_utils.mock_record_keepalive
class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
diff --git a/ironic/tests/unit/conductor/test_utils.py b/ironic/tests/unit/conductor/test_utils.py
index 86d51b705..482387ddf 100644
--- a/ironic/tests/unit/conductor/test_utils.py
+++ b/ironic/tests/unit/conductor/test_utils.py
@@ -1915,3 +1915,106 @@ class FastTrackTestCase(db_base.DbTestCase):
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertFalse(conductor_utils.is_fast_track(task))
+
+
+class GetNodeNextStepsTestCase(db_base.DbTestCase):
+ def setUp(self):
+ super(GetNodeNextStepsTestCase, self).setUp()
+ self.power_update = {
+ 'step': 'update_firmware', 'priority': 10, 'interface': 'power'}
+ self.deploy_update = {
+ 'step': 'update_firmware', 'priority': 10, 'interface': 'deploy'}
+ self.deploy_erase = {
+ 'step': 'erase_disks', 'priority': 20, 'interface': 'deploy'}
+ # Automated cleaning should be executed in this order
+ self.clean_steps = [self.deploy_erase, self.power_update,
+ self.deploy_update]
+ self.deploy_start = {
+ 'step': 'deploy_start', 'priority': 50, 'interface': 'deploy'}
+ self.deploy_end = {
+ 'step': 'deploy_end', 'priority': 20, 'interface': 'deploy'}
+ self.deploy_steps = [self.deploy_start, self.deploy_end]
+
+ def _test_get_node_next_deploy_steps(self, skip=True):
+ driver_internal_info = {'deploy_steps': self.deploy_steps,
+ 'deploy_step_index': 0}
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYWAIT,
+ target_provision_state=states.ACTIVE,
+ driver_internal_info=driver_internal_info,
+ last_error=None,
+ deploy_step=self.deploy_steps[0])
+
+ with task_manager.acquire(self.context, node.uuid) as task:
+ step_index = conductor_utils.get_node_next_deploy_steps(
+ task, skip_current_step=skip)
+ expected_index = 1 if skip else 0
+ self.assertEqual(expected_index, step_index)
+
+ def test_get_node_next_deploy_steps(self):
+ self._test_get_node_next_deploy_steps()
+
+ def test_get_node_next_deploy_steps_no_skip(self):
+ self._test_get_node_next_deploy_steps(skip=False)
+
+ def test_get_node_next_deploy_steps_unset_deploy_step(self):
+ driver_internal_info = {'deploy_steps': self.deploy_steps,
+ 'deploy_step_index': None}
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.DEPLOYWAIT,
+ target_provision_state=states.ACTIVE,
+ driver_internal_info=driver_internal_info,
+ last_error=None,
+ deploy_step=None)
+
+ with task_manager.acquire(self.context, node.uuid) as task:
+ step_index = conductor_utils.get_node_next_deploy_steps(task)
+ self.assertEqual(0, step_index)
+
+ def test_get_node_next_steps_exception(self):
+ node = obj_utils.create_test_node(self.context)
+
+ with task_manager.acquire(self.context, node.uuid) as task:
+ self.assertRaises(exception.Invalid,
+ conductor_utils._get_node_next_steps,
+ task, 'foo')
+
+ def _test_get_node_next_clean_steps(self, skip=True):
+ driver_internal_info = {'clean_steps': self.clean_steps,
+ 'clean_step_index': 0}
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANWAIT,
+ target_provision_state=states.AVAILABLE,
+ driver_internal_info=driver_internal_info,
+ last_error=None,
+ clean_step=self.clean_steps[0])
+
+ with task_manager.acquire(self.context, node.uuid) as task:
+ step_index = conductor_utils.get_node_next_clean_steps(
+ task, skip_current_step=skip)
+ expected_index = 1 if skip else 0
+ self.assertEqual(expected_index, step_index)
+
+ def test_get_node_next_clean_steps(self):
+ self._test_get_node_next_clean_steps()
+
+ def test_get_node_next_clean_steps_no_skip(self):
+ self._test_get_node_next_clean_steps(skip=False)
+
+ def test_get_node_next_clean_steps_unset_clean_step(self):
+ driver_internal_info = {'clean_steps': self.clean_steps,
+ 'clean_step_index': None}
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANWAIT,
+ target_provision_state=states.AVAILABLE,
+ driver_internal_info=driver_internal_info,
+ last_error=None,
+ clean_step=None)
+
+ with task_manager.acquire(self.context, node.uuid) as task:
+ step_index = conductor_utils.get_node_next_clean_steps(task)
+ self.assertEqual(0, step_index)
diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
index 715ec15c3..76b575711 100644
--- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py
+++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
@@ -969,6 +969,27 @@ class MigrationCheckersMixin(object):
col_names = [column.name for column in allocations.c]
self.assertIn('owner', col_names)
+ def _pre_upgrade_cd2c80feb331(self, engine):
+ data = {
+ 'node_uuid': uuidutils.generate_uuid(),
+ }
+
+ nodes = db_utils.get_table(engine, 'nodes')
+ nodes.insert().execute({'uuid': data['node_uuid']})
+
+ return data
+
+ def _check_cd2c80feb331(self, engine, data):
+ nodes = db_utils.get_table(engine, 'nodes')
+ col_names = [column.name for column in nodes.c]
+ self.assertIn('retired', col_names)
+ self.assertIn('retired_reason', col_names)
+
+ node = nodes.select(
+ nodes.c.uuid == data['node_uuid']).execute().first()
+ self.assertFalse(node['retired'])
+ self.assertIsNone(node['retired_reason'])
+
def test_upgrade_and_version(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('head')
diff --git a/ironic/tests/unit/db/test_ports.py b/ironic/tests/unit/db/test_ports.py
index fa12c4c25..e0d2e1d66 100644
--- a/ironic/tests/unit/db/test_ports.py
+++ b/ironic/tests/unit/db/test_ports.py
@@ -28,7 +28,7 @@ class DbPortTestCase(base.DbTestCase):
# This method creates a port for every test and
# replaces a test for creating a port.
super(DbPortTestCase, self).setUp()
- self.node = db_utils.create_test_node()
+ self.node = db_utils.create_test_node(owner='12345')
self.portgroup = db_utils.create_test_portgroup(node_id=self.node.id)
self.port = db_utils.create_test_port(node_id=self.node.id,
portgroup_id=self.portgroup.id)
@@ -45,6 +45,17 @@ class DbPortTestCase(base.DbTestCase):
res = self.dbapi.get_port_by_address(self.port.address)
self.assertEqual(self.port.id, res.id)
+ def test_get_port_by_address_filter_by_owner(self):
+ res = self.dbapi.get_port_by_address(self.port.address,
+ owner=self.node.owner)
+ self.assertEqual(self.port.id, res.id)
+
+ def test_get_port_by_address_filter_by_owner_no_match(self):
+ self.assertRaises(exception.PortNotFound,
+ self.dbapi.get_port_by_address,
+ self.port.address,
+ owner='54321')
+
def test_get_port_list(self):
uuids = []
for i in range(1, 6):
@@ -72,10 +83,36 @@ class DbPortTestCase(base.DbTestCase):
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.get_port_list, sort_key='foo')
+ def test_get_port_list_filter_by_node_owner(self):
+ uuids = []
+ for i in range(1, 3):
+ port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ address='52:54:00:cf:2d:4%s' % i)
+ for i in range(4, 6):
+ port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ node_id=self.node.id,
+ address='52:54:00:cf:2d:4%s' % i)
+ uuids.append(str(port.uuid))
+ # Also add the uuid for the port created in setUp()
+ uuids.append(str(self.port.uuid))
+ res = self.dbapi.get_port_list(owner=self.node.owner)
+ res_uuids = [r.uuid for r in res]
+ self.assertCountEqual(uuids, res_uuids)
+
def test_get_ports_by_node_id(self):
res = self.dbapi.get_ports_by_node_id(self.node.id)
self.assertEqual(self.port.address, res[0].address)
+ def test_get_ports_by_node_id_filter_by_node_owner(self):
+ res = self.dbapi.get_ports_by_node_id(self.node.id,
+ owner=self.node.owner)
+ self.assertEqual(self.port.address, res[0].address)
+
+ def test_get_ports_by_node_id_filter_by_node_owner_no_match(self):
+ res = self.dbapi.get_ports_by_node_id(self.node.id,
+ owner='54321')
+ self.assertEqual([], res)
+
def test_get_ports_by_node_id_that_does_not_exist(self):
self.assertEqual([], self.dbapi.get_ports_by_node_id(99))
@@ -83,6 +120,16 @@ class DbPortTestCase(base.DbTestCase):
res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id)
self.assertEqual(self.port.address, res[0].address)
+ def test_get_ports_by_portgroup_id_filter_by_node_owner(self):
+ res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id,
+ owner=self.node.owner)
+ self.assertEqual(self.port.address, res[0].address)
+
+ def test_get_ports_by_portgroup_id_filter_by_node_owner_no_match(self):
+ res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id,
+ owner='54321')
+ self.assertEqual([], res)
+
def test_get_ports_by_portgroup_id_that_does_not_exist(self):
self.assertEqual([], self.dbapi.get_ports_by_portgroup_id(99))
diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py
index 81a46d89a..e8a4c45e3 100644
--- a/ironic/tests/unit/db/utils.py
+++ b/ironic/tests/unit/db/utils.py
@@ -224,6 +224,9 @@ def get_test_node(**kw):
'owner': kw.get('owner', None),
'allocation_id': kw.get('allocation_id'),
'description': kw.get('description'),
+ 'retired': kw.get('retired', False),
+ 'retired_reason': kw.get('retired_reason', None),
+
}
for iface in drivers_base.ALL_INTERFACES:
diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
index 7f448df30..9624286b5 100644
--- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
@@ -1255,6 +1255,31 @@ class OtherFunctionTestCase(db_base.DbTestCase):
result = utils.get_boot_option(self.node)
self.assertEqual("netboot", result)
+ @mock.patch.object(utils, 'is_software_raid', autospec=True)
+ def test_get_boot_option_software_raid(self, mock_is_software_raid):
+ mock_is_software_raid.return_value = True
+ cfg.CONF.set_override('default_boot_option', 'netboot', 'deploy')
+ result = utils.get_boot_option(self.node)
+ self.assertEqual("local", result)
+
+ def test_is_software_raid(self):
+ self.node.target_raid_config = {
+ "logical_disks": [
+ {
+ "size_gb": 100,
+ "raid_level": "1",
+ "controller": "software",
+ }
+ ]
+ }
+ result = utils.is_software_raid(self.node)
+ self.assertTrue(result)
+
+ def test_is_software_raid_false(self):
+ self.node.target_raid_config = {}
+ result = utils.is_software_raid(self.node)
+ self.assertFalse(result)
+
@mock.patch.object(image_cache, 'clean_up_caches', autospec=True)
def test_fetch_images(self, mock_clean_up_caches):
diff --git a/ironic/tests/unit/objects/test_node.py b/ironic/tests/unit/objects/test_node.py
index 1f0988c2a..07d4cdae7 100644
--- a/ironic/tests/unit/objects/test_node.py
+++ b/ironic/tests/unit/objects/test_node.py
@@ -886,6 +886,64 @@ class TestConvertToVersion(db_base.DbTestCase):
self.assertEqual({'protected': False, 'protected_reason': None},
node.obj_get_changes())
+ def test_retired_supported_missing(self):
+ # retired_interface not set, should be set to default.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+ delattr(node, 'retired')
+ delattr(node, 'retired_reason')
+ node.obj_reset_changes()
+ node._convert_to_version("1.33")
+ self.assertFalse(node.retired)
+ self.assertIsNone(node.retired_reason)
+ self.assertEqual({'retired': False, 'retired_reason': None},
+ node.obj_get_changes())
+
+ def test_retired_supported_set(self):
+ # retired set, no change required.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ node.retired = True
+ node.retired_reason = 'a reason'
+ node.obj_reset_changes()
+ node._convert_to_version("1.33")
+ self.assertTrue(node.retired)
+ self.assertEqual('a reason', node.retired_reason)
+ self.assertEqual({}, node.obj_get_changes())
+
+ def test_retired_unsupported_missing(self):
+ # retired not set, no change required.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ delattr(node, 'retired')
+ delattr(node, 'retired_reason')
+ node.obj_reset_changes()
+ node._convert_to_version("1.32")
+ self.assertNotIn('retired', node)
+ self.assertNotIn('retired_reason', node)
+ self.assertEqual({}, node.obj_get_changes())
+
+ def test_retired_unsupported_set_remove(self):
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ node.retired = True
+ node.retired_reason = 'another reason'
+ node.obj_reset_changes()
+ node._convert_to_version("1.32")
+ self.assertNotIn('retired', node)
+ self.assertNotIn('retired_reason', node)
+ self.assertEqual({}, node.obj_get_changes())
+
+ def test_retired_unsupported_set_no_remove_non_default(self):
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ node.retired = True
+ node.retired_reason = 'yet another reason'
+ node.obj_reset_changes()
+ node._convert_to_version("1.32", False)
+ self.assertIsNone(node.automated_clean)
+ self.assertEqual({'retired': False, 'retired_reason': None},
+ node.obj_get_changes())
+
def test_owner_supported_missing(self):
# owner_interface not set, should be set to default.
node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py
index 33ae36d6d..457c8be3c 100644
--- a/ironic/tests/unit/objects/test_objects.py
+++ b/ironic/tests/unit/objects/test_objects.py
@@ -676,7 +676,7 @@ class TestObject(_LocalTest, _TestObject):
# version bump. It is an MD5 hash of the object fields and remotable methods.
# The fingerprint values should only be changed if there is a version bump.
expected_object_fingerprints = {
- 'Node': '1.32-525750e76f07b62142ed5297334b7832',
+ 'Node': '1.33-d6a8ba8dd3be3b2bbad0e0a5b9887aa8',
'MyObj': '1.5-9459d30d6954bffc7a9afd347a807ca6',
'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905',
'Port': '1.9-0cb9202a4ec442e8c0d87a324155eaaf',
@@ -684,21 +684,21 @@ expected_object_fingerprints = {
'Conductor': '1.3-d3f53e853b4d58cae5bfbd9a8341af4a',
'EventType': '1.1-aa2ba1afd38553e3880c267404e8d370',
'NotificationPublisher': '1.0-51a09397d6c0687771fb5be9a999605d',
- 'NodePayload': '1.13-18a34d461ef7d5dbc1c3e5a55fcb867a',
+ 'NodePayload': '1.14-8b2dfc37d800f268d29a580ac034e2c6',
'NodeSetPowerStateNotification': '1.0-59acc533c11d306f149846f922739c15',
- 'NodeSetPowerStatePayload': '1.13-4f96e52568e058e3fd6ffc9b0cf15764',
+ 'NodeSetPowerStatePayload': '1.14-dcd4d7911717ba323ab4c3297b92c31c',
'NodeCorrectedPowerStateNotification':
'1.0-59acc533c11d306f149846f922739c15',
- 'NodeCorrectedPowerStatePayload': '1.13-929af354e7c3474520ce6162ee794717',
+ 'NodeCorrectedPowerStatePayload': '1.14-c7d20e953bbb9a1a4ce31ce22068e4bf',
'NodeSetProvisionStateNotification':
'1.0-59acc533c11d306f149846f922739c15',
- 'NodeSetProvisionStatePayload': '1.13-fa15d2954961d8edcaba9d737a1cad91',
+ 'NodeSetProvisionStatePayload': '1.14-6d4145044a98c5cc80a40d69bbd98f61',
'VolumeConnector': '1.0-3e0252c0ab6e6b9d158d09238a577d97',
'VolumeTarget': '1.0-0b10d663d8dae675900b2c7548f76f5e',
'ChassisCRUDNotification': '1.0-59acc533c11d306f149846f922739c15',
'ChassisCRUDPayload': '1.0-dce63895d8186279a7dd577cffccb202',
'NodeCRUDNotification': '1.0-59acc533c11d306f149846f922739c15',
- 'NodeCRUDPayload': '1.11-f1c6a6b099e8e28f55378c448c033de0',
+ 'NodeCRUDPayload': '1.12-3f63cdace5159785535049025ddf6a5c',
'PortCRUDNotification': '1.0-59acc533c11d306f149846f922739c15',
'PortCRUDPayload': '1.3-21235916ed54a91b2a122f59571194e7',
'NodeMaintenanceNotification': '1.0-59acc533c11d306f149846f922739c15',
diff --git a/ironic/tests/unit/objects/test_port.py b/ironic/tests/unit/objects/test_port.py
index 19b15faf3..32df1e52a 100644
--- a/ironic/tests/unit/objects/test_port.py
+++ b/ironic/tests/unit/objects/test_port.py
@@ -66,7 +66,7 @@ class TestPortObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
port = objects.Port.get(self.context, address)
- mock_get_port.assert_called_once_with(address)
+ mock_get_port.assert_called_once_with(address, owner=None)
self.assertEqual(self.context, port._context)
def test_get_bad_id_and_uuid_and_address(self):
diff --git a/releasenotes/notes/add_retirement_support-23c5fed7ce8f97d4.yaml b/releasenotes/notes/add_retirement_support-23c5fed7ce8f97d4.yaml
new file mode 100644
index 000000000..8165df1d9
--- /dev/null
+++ b/releasenotes/notes/add_retirement_support-23c5fed7ce8f97d4.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Adds support for node retirement by adding a ``retired`` property
+ to the node. If set, a node moves upon automatic cleaning to
+ ``manageable`` (rather than ``available``). The new property can also
+ block the provide keyword, i.e. nodes cannot move from ``manageable``
+ to ``available``. Furthermore, there is an additional optional property
+ ``retirement_reason`` to store the reason for the node's retirement. \ No newline at end of file
diff --git a/releasenotes/notes/fixes-get-boot-option-for-software-raid-baa2cffd95e1f624.yaml b/releasenotes/notes/fixes-get-boot-option-for-software-raid-baa2cffd95e1f624.yaml
new file mode 100644
index 000000000..849273ac8
--- /dev/null
+++ b/releasenotes/notes/fixes-get-boot-option-for-software-raid-baa2cffd95e1f624.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes a minor issue with ``get_boot_option`` logic that did not account
+ for Software RAID. This can erroniously cause the deployment to take the
+ the incorrect deployment path and attempt to install a boot loader.
diff --git a/releasenotes/notes/node-owner-policy-ports-1d3193fd897feaa6.yaml b/releasenotes/notes/node-owner-policy-ports-1d3193fd897feaa6.yaml
new file mode 100644
index 000000000..49a984dd0
--- /dev/null
+++ b/releasenotes/notes/node-owner-policy-ports-1d3193fd897feaa6.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A port is owned by its associated node's owner. This owner is now exposed
+ to policy checks, giving Ironic admins the option of modifying the policy
+ file to allow users specified by a node's owner field to perform API
+ actions on that node's associated ports through the ``is_node_owner``
+ rule.