summaryrefslogtreecommitdiff
path: root/ironic
diff options
context:
space:
mode:
Diffstat (limited to 'ironic')
-rw-r--r--ironic/api/controllers/v1/bios.py8
-rw-r--r--ironic/api/controllers/v1/node.py106
-rw-r--r--ironic/api/controllers/v1/port.py45
-rw-r--r--ironic/api/controllers/v1/portgroup.py91
-rw-r--r--ironic/api/controllers/v1/utils.py270
-rw-r--r--ironic/api/controllers/v1/volume_connector.py61
-rw-r--r--ironic/api/controllers/v1/volume_target.py87
-rw-r--r--ironic/common/driver_factory.py27
-rw-r--r--ironic/common/policy.py523
-rw-r--r--ironic/common/pxe_utils.py21
-rw-r--r--ironic/conductor/task_manager.py72
-rw-r--r--ironic/conf/ilo.py9
-rw-r--r--ironic/conf/redfish.py10
-rw-r--r--ironic/db/api.py31
-rw-r--r--ironic/db/sqlalchemy/api.py67
-rw-r--r--ironic/drivers/modules/drac/inspect.py3
-rw-r--r--ironic/drivers/modules/ilo/boot.py16
-rw-r--r--ironic/drivers/modules/image_utils.py94
-rw-r--r--ironic/drivers/modules/redfish/raid.py1119
-rw-r--r--ironic/drivers/modules/redfish/utils.py18
-rw-r--r--ironic/drivers/redfish.py7
-rw-r--r--ironic/objects/portgroup.py18
-rw-r--r--ironic/objects/volume_connector.py13
-rw-r--r--ironic/objects/volume_target.py19
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py12
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_utils.py39
-rw-r--r--ironic/tests/unit/api/test_acl.py120
-rw-r--r--ironic/tests/unit/api/test_acl_basic.yaml4
-rw-r--r--ironic/tests/unit/api/test_rbac_legacy.yaml118
-rw-r--r--ironic/tests/unit/api/test_rbac_project_scoped.yaml874
-rw-r--r--ironic/tests/unit/api/test_rbac_system_scoped.yaml44
-rw-r--r--ironic/tests/unit/common/test_driver_factory.py31
-rw-r--r--ironic/tests/unit/common/test_pxe_utils.py46
-rw-r--r--ironic/tests/unit/conductor/test_manager.py2
-rw-r--r--ironic/tests/unit/conductor/test_task_manager.py128
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template56
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_inspect.py54
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_boot.py26
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_inspect.py17
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_raid.py846
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_utils.py19
-rw-r--r--ironic/tests/unit/drivers/modules/test_image_utils.py64
-rw-r--r--ironic/tests/unit/drivers/third_party_driver_mock_specs.py2
-rw-r--r--ironic/tests/unit/drivers/third_party_driver_mocks.py2
-rw-r--r--ironic/tests/unit/objects/test_portgroup.py2
-rw-r--r--ironic/tests/unit/objects/test_volume_connector.py9
-rw-r--r--ironic/tests/unit/objects/test_volume_target.py11
47 files changed, 4332 insertions, 929 deletions
diff --git a/ironic/api/controllers/v1/bios.py b/ironic/api/controllers/v1/bios.py
index fd35689e2..05fc46d8a 100644
--- a/ironic/api/controllers/v1/bios.py
+++ b/ironic/api/controllers/v1/bios.py
@@ -56,9 +56,9 @@ class NodeBiosController(rest.RestController):
@method.expose()
def get_all(self):
"""List node bios settings."""
- api_utils.check_policy('baremetal:node:bios:get')
+ node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:bios:get', self.node_ident)
- node = api_utils.get_rpc_node(self.node_ident)
settings = objects.BIOSSettingList.get_by_node_id(
api.request.context, node.id)
return collection_from_list(self.node_ident, settings)
@@ -71,9 +71,9 @@ class NodeBiosController(rest.RestController):
:param setting_name: Logical name of the setting to retrieve.
"""
- api_utils.check_policy('baremetal:node:bios:get')
+ node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:bios:get', self.node_ident)
- node = api_utils.get_rpc_node(self.node_ident)
try:
setting = objects.BIOSSetting.get(api.request.context, node.id,
setting_name)
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index de87d566d..be5f0106d 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -47,6 +47,7 @@ from ironic.common import policy
from ironic.common import states as ir_states
from ironic.conductor import steps as conductor_steps
import ironic.conf
+from ironic.drivers import base as driver_base
from ironic import objects
@@ -513,9 +514,9 @@ class IndicatorController(rest.RestController):
mod:`ironic.common.indicator_states`.
"""
- api_utils.check_policy('baremetal:node:set_indicator_state')
-
- rpc_node = api_utils.get_rpc_node(node_ident)
+ rpc_node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:set_indicator_state',
+ node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
indicator_at_component = IndicatorAtComponent(unique_name=indicator)
pecan.request.rpcapi.set_indicator_state(
@@ -535,9 +536,9 @@ class IndicatorController(rest.RestController):
:returns: a dict with the "state" key and one of
mod:`ironic.common.indicator_states` as a value.
"""
- api_utils.check_policy('baremetal:node:get_indicator_state')
-
- rpc_node = api_utils.get_rpc_node(node_ident)
+ rpc_node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:get_indicator_state',
+ node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
indicator_at_component = IndicatorAtComponent(unique_name=indicator)
state = pecan.request.rpcapi.get_indicator_state(
@@ -558,9 +559,9 @@ class IndicatorController(rest.RestController):
(from `get_supported_indicators`) as values.
"""
- api_utils.check_policy('baremetal:node:get_indicator_state')
-
- rpc_node = api_utils.get_rpc_node(node_ident)
+ rpc_node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:get_indicator_state',
+ node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
indicators = pecan.request.rpcapi.get_supported_indicators(
pecan.request.context, rpc_node.uuid, topic=topic)
@@ -1324,13 +1325,64 @@ def node_sanitize(node, fields):
:type fields: list of str
"""
cdict = api.request.context.to_policy_values()
+ target_dict = dict(cdict)
+ owner = node.get('owner')
+ lessee = node.get('lessee')
+
+ if owner:
+ target_dict['node.owner'] = owner
+ if lessee:
+ target_dict['node.lessee'] = lessee
+
# NOTE(tenbrae): the 'show_password' policy setting name exists for
# legacy purposes and can not be changed. Changing it will
# cause upgrade problems for any operators who have
# customized the value of this field
- show_driver_secrets = policy.check("show_password", cdict, cdict)
+ # NOTE(TheJulia): These methods use policy.check and normally return
+ # False in a noauth or password auth based situation, because the
+ # effective caller doesn't match the policy check rule.
+ show_driver_secrets = policy.check("show_password", cdict, target_dict)
show_instance_secrets = policy.check("show_instance_secrets",
- cdict, cdict)
+ cdict, target_dict)
+ # TODO(TheJulia): The above checks need to be migrated in some direction,
+ # but until we have auditing clarity, it might not be a big deal.
+
+ # Determine if we need to do the additional checks. Keep in mind
+ # nova integrated with ironic is API read heavy, so it is ideal
+ # to keep the policy checks for say system-member based roles to
+ # a minimum as they are likely the regular API users as well.
+ # Also, the default for the filter_threshold is system-member.
+ evaluate_additional_policies = not policy.check_policy(
+ "baremetal:node:get:filter_threshold",
+ target_dict, cdict)
+ if evaluate_additional_policies:
+ # NOTE(TheJulia): The net effect of this is that by default,
+ # at least matching common/policy.py defaults. is these should
+ # be stripped out.
+ if not policy.check("baremetal:node:get:last_error",
+ target_dict, cdict):
+ # Guard the last error from being visible as it can contain
+ # hostnames revealing infrastucture internal details.
+ node['last_error'] = ('** Value Redacted - Requires '
+ 'baremetal:node:get:last_error '
+ 'permission. **')
+ if not policy.check("baremetal:node:get:reservation",
+ target_dict, cdict):
+ # Guard conductor names from being visible.
+ node['reservation'] = ('** Redacted - requires baremetal:'
+ 'node:get:reservation permission. **')
+ if not policy.check("baremetal:node:get:driver_internal_info",
+ target_dict, cdict):
+ # Guard conductor names from being visible.
+ node['driver_internal_info'] = {
+ 'content': '** Redacted - Requires baremetal:node:get:'
+ 'driver_internal_info permission. **'}
+ if not policy.check("baremetal:node:get:driver_info",
+ target_dict, cdict):
+ # Guard infrastructure intenral details from being visible.
+ node['driver_info'] = {
+ 'content': '** Redacted - requires baremetal:node:get:'
+ 'driver_info permission. **'}
if not show_driver_secrets and node.get('driver_info'):
node['driver_info'] = strutils.mask_dict_password(
@@ -2148,9 +2200,38 @@ class NodesController(rest.RestController):
and strutils.bool_from_string(p['value'], default=None)
is False):
policy_checks.append('baremetal:node:disable_cleaning')
+ elif p['path'].startswith('/driver_info'):
+ policy_checks.append('baremetal:node:update:driver_info')
+ elif p['path'].startswith('/properties'):
+ policy_checks.append('baremetal:node:update:properties')
+ elif p['path'].startswith('/chassis_uuid'):
+ policy_checks.append('baremetal:node:update:chassis_uuid')
+ elif p['path'].startswith('/instance_uuid'):
+ policy_checks.append('baremetal:node:update:instance_uuid')
+ elif p['path'].startswith('/lessee'):
+ policy_checks.append('baremetal:node:update:lessee')
+ elif p['path'].startswith('/owner'):
+ policy_checks.append('baremetal:node:update:owner')
+ elif p['path'].startswith('/driver'):
+ policy_checks.append('baremetal:node:update:driver_interfaces')
+ elif ((p['path'].lstrip('/').rsplit(sep="_", maxsplit=1)[0]
+ in driver_base.ALL_INTERFACES)
+ and (p['path'].lstrip('/').rsplit(sep="_", maxsplit=1)[-1]
+ == "interface")):
+ # TODO(TheJulia): Replace the above check with something like
+ # elif (p['path'].lstrip('/').removesuffix('_interface')
+ # when the minimum supported version is Python 3.9.
+ policy_checks.append('baremetal:node:update:driver_interfaces')
+ elif p['path'].startswith('/network_data'):
+ policy_checks.append('baremetal:node:update:network_data')
+ elif p['path'].startswith('/conductor_group'):
+ policy_checks.append('baremetal:node:update:conductor_group')
+ elif p['path'].startswith('/name'):
+ policy_checks.append('baremetal:node:update:name')
+ elif p['path'].startswith('/retired'):
+ policy_checks.append('baremetal:node:update:retired')
else:
generic_update = True
-
# always do at least one check
if generic_update or not policy_checks:
policy_checks.append('baremetal:node:update')
@@ -2252,7 +2333,6 @@ class NodesController(rest.RestController):
node_dict, NODE_PATCH_SCHEMA, NODE_PATCH_VALIDATOR)
self._update_changed_fields(node_dict, rpc_node)
-
# NOTE(tenbrae): we calculate the rpc topic here in case node.driver
# has changed, so that update is sent to the
# new conductor, not the old one which may fail to
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index f4480ef7b..4c58e0110 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -383,7 +383,15 @@ class PortsController(rest.RestController):
for that portgroup.
:raises: NotAcceptable, HTTPNotFound
"""
- project = api_utils.check_port_list_policy()
+ project = api_utils.check_port_list_policy(
+ parent_node=self.parent_node_ident,
+ parent_portgroup=self.parent_portgroup_ident)
+
+ if self.parent_node_ident:
+ node = self.parent_node_ident
+
+ if self.parent_portgroup_ident:
+ portgroup = self.parent_portgroup_ident
api_utils.check_allow_specify_fields(fields)
self._check_allowed_port_fields(fields)
@@ -439,7 +447,9 @@ class PortsController(rest.RestController):
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:raises: NotAcceptable, HTTPNotFound
"""
- project = api_utils.check_port_list_policy()
+ project = api_utils.check_port_list_policy(
+ parent_node=self.parent_node_ident,
+ parent_portgroup=self.parent_portgroup_ident)
self._check_allowed_port_fields([sort_key])
if portgroup and not api_utils.allow_portgroups_subcontrollers():
@@ -499,13 +509,36 @@ class PortsController(rest.RestController):
if self.parent_node_ident or self.parent_portgroup_ident:
raise exception.OperationNotPermitted()
- context = api.request.context
- api_utils.check_policy('baremetal:port:create')
-
# NOTE(lucasagomes): Create the node_id attribute on-the-fly
# to satisfy the api -> rpc object
# conversion.
- node = api_utils.replace_node_uuid_with_id(port)
+ # NOTE(TheJulia): The get of the node *does* check if the node
+ # can be accessed. We need to be able to get the node regardless
+ # in order to perform the actual policy check.
+ raise_node_not_found = False
+ node = None
+ owner = None
+ lessee = None
+ node_uuid = port.get('node_uuid')
+ try:
+ node = api_utils.replace_node_uuid_with_id(port)
+ owner = node.owner
+ lessee = node.lessee
+ except exception.NotFound:
+ raise_node_not_found = True
+
+ # While the rule is for the port, the base object that controls access
+ # is the node.
+ api_utils.check_owner_policy('node', 'baremetal:port:create',
+ owner, lessee=lessee,
+ conceal_node=False)
+ if raise_node_not_found:
+ # Delayed raise of NodeNotFound because we want to check
+ # the access policy first.
+ raise exception.NodeNotFound(node=node_uuid,
+ code=http_client.BAD_REQUEST)
+
+ context = api.request.context
self._check_allowed_port_fields(port)
diff --git a/ironic/api/controllers/v1/portgroup.py b/ironic/api/controllers/v1/portgroup.py
index 077e9ab71..6c9cc9303 100644
--- a/ironic/api/controllers/v1/portgroup.py
+++ b/ironic/api/controllers/v1/portgroup.py
@@ -170,7 +170,7 @@ class PortgroupsController(pecan.rest.RestController):
def _get_portgroups_collection(self, node_ident, address,
marker, limit, sort_key, sort_dir,
resource_url=None, fields=None,
- detail=None):
+ detail=None, project=None):
"""Return portgroups collection.
:param node_ident: UUID or name of a node.
@@ -182,6 +182,7 @@ class PortgroupsController(pecan.rest.RestController):
:param resource_url: Optional, URL to the portgroup resource.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
+ :param project: Optional, project ID to filter the request by.
"""
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
@@ -206,13 +207,16 @@ class PortgroupsController(pecan.rest.RestController):
node = api_utils.get_rpc_node(node_ident)
portgroups = objects.Portgroup.list_by_node_id(
api.request.context, node.id, limit,
- marker_obj, sort_key=sort_key, sort_dir=sort_dir)
+ marker_obj, sort_key=sort_key, sort_dir=sort_dir,
+ project=project)
elif address:
- portgroups = self._get_portgroups_by_address(address)
+ portgroups = self._get_portgroups_by_address(address,
+ project=project)
else:
portgroups = objects.Portgroup.list(api.request.context, limit,
marker_obj, sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
parameters = {}
if detail is not None:
parameters['detail'] = detail
@@ -224,7 +228,7 @@ class PortgroupsController(pecan.rest.RestController):
sort_dir=sort_dir,
**parameters)
- def _get_portgroups_by_address(self, address):
+ def _get_portgroups_by_address(self, address, project=None):
"""Retrieve a portgroup by its address.
:param address: MAC address of a portgroup, to get the portgroup
@@ -235,7 +239,8 @@ class PortgroupsController(pecan.rest.RestController):
"""
try:
portgroup = objects.Portgroup.get_by_address(api.request.context,
- address)
+ address,
+ project=project)
return [portgroup]
except exception.PortgroupNotFound:
return []
@@ -268,7 +273,14 @@ class PortgroupsController(pecan.rest.RestController):
if not api_utils.allow_portgroups():
raise exception.NotFound()
- api_utils.check_policy('baremetal:portgroup:get')
+ if self.parent_node_ident:
+ # Override the node, since this is being called by another
+ # controller with a linked view.
+ node = self.parent_node_ident
+
+ project = api_utils.check_port_list_policy(
+ portgroup=True,
+ parent_node=self.parent_node_ident)
api_utils.check_allowed_portgroup_fields(fields)
api_utils.check_allowed_portgroup_fields([sort_key])
@@ -280,7 +292,8 @@ class PortgroupsController(pecan.rest.RestController):
marker, limit,
sort_key, sort_dir,
fields=fields,
- detail=detail)
+ detail=detail,
+ project=project)
@METRICS.timer('PortgroupsController.detail')
@method.expose()
@@ -306,7 +319,15 @@ class PortgroupsController(pecan.rest.RestController):
if not api_utils.allow_portgroups():
raise exception.NotFound()
- api_utils.check_policy('baremetal:portgroup:get')
+ if self.parent_node_ident:
+ # If we have a parent node, then we need to override this method's
+ # node filter.
+ node = self.parent_node_ident
+
+ project = api_utils.check_port_list_policy(
+ portgroup=True,
+ parent_node=self.parent_node_ident)
+
api_utils.check_allowed_portgroup_fields([sort_key])
# NOTE: /detail should only work against collections
@@ -317,7 +338,7 @@ class PortgroupsController(pecan.rest.RestController):
resource_url = '/'.join(['portgroups', 'detail'])
return self._get_portgroups_collection(
node, address, marker, limit, sort_key, sort_dir,
- resource_url=resource_url)
+ resource_url=resource_url, project=project)
@METRICS.timer('PortgroupsController.get_one')
@method.expose()
@@ -332,7 +353,8 @@ class PortgroupsController(pecan.rest.RestController):
if not api_utils.allow_portgroups():
raise exception.NotFound()
- api_utils.check_policy('baremetal:portgroup:get')
+ rpc_portgroup, rpc_node = api_utils.check_port_policy_and_retrieve(
+ 'baremetal:portgroup:get', portgroup_ident, portgroup=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
@@ -355,8 +377,31 @@ class PortgroupsController(pecan.rest.RestController):
if not api_utils.allow_portgroups():
raise exception.NotFound()
+ raise_node_not_found = False
+ node = None
+ owner = None
+ lessee = None
+ node_uuid = portgroup.get('node_uuid')
+ try:
+ # The replace_node_uuid_with_id also checks access to the node
+ # and will raise an exception if access is not permitted.
+ node = api_utils.replace_node_uuid_with_id(portgroup)
+ owner = node.owner
+ lessee = node.lessee
+ except exception.NotFound:
+ raise_node_not_found = True
+
+ # While the rule is for the port, the base object that controls access
+ # is the node.
+ api_utils.check_owner_policy('node', 'baremetal:portgroup:create',
+ owner, lessee=lessee,
+ conceal_node=False)
+ if raise_node_not_found:
+ # Delayed raise of NodeNotFound because we want to check
+ # the access policy first.
+ raise exception.NodeNotFound(node=node_uuid,
+ code=http_client.BAD_REQUEST)
context = api.request.context
- api_utils.check_policy('baremetal:portgroup:create')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
@@ -378,8 +423,6 @@ class PortgroupsController(pecan.rest.RestController):
if not portgroup.get('uuid'):
portgroup['uuid'] = uuidutils.generate_uuid()
- node = api_utils.replace_node_uuid_with_id(portgroup)
-
new_portgroup = objects.Portgroup(context, **portgroup)
notify.emit_start_notification(context, new_portgroup, 'create',
@@ -409,7 +452,9 @@ class PortgroupsController(pecan.rest.RestController):
raise exception.NotFound()
context = api.request.context
- api_utils.check_policy('baremetal:portgroup:update')
+
+ rpc_portgroup, rpc_node = api_utils.check_port_policy_and_retrieve(
+ 'baremetal:portgroup:update', portgroup_ident, portgroup=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
@@ -421,9 +466,6 @@ class PortgroupsController(pecan.rest.RestController):
api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS)
- rpc_portgroup = api_utils.get_rpc_portgroup_with_suffix(
- portgroup_ident)
-
names = api_utils.get_patch_values(patch, '/name')
for name in names:
if (name and not api_utils.is_valid_logical_name(name)):
@@ -440,8 +482,8 @@ class PortgroupsController(pecan.rest.RestController):
# 1) Remove node_id because it's an internal value and
# not present in the API object
# 2) Add node_uuid
- rpc_node = api_utils.replace_node_id_with_uuid(portgroup_dict)
-
+ portgroup_dict.pop('node_id')
+ portgroup_dict['node_uuid'] = rpc_node.uuid
portgroup_dict = api_utils.apply_jsonpatch(portgroup_dict, patch)
if 'mode' not in portgroup_dict:
@@ -504,17 +546,14 @@ class PortgroupsController(pecan.rest.RestController):
if not api_utils.allow_portgroups():
raise exception.NotFound()
+ rpc_portgroup, rpc_node = api_utils.check_port_policy_and_retrieve(
+ 'baremetal:portgroup:delete', portgroup_ident, portgroup=True)
+
context = api.request.context
- api_utils.check_policy('baremetal:portgroup:delete')
if self.parent_node_ident:
raise exception.OperationNotPermitted()
- rpc_portgroup = api_utils.get_rpc_portgroup_with_suffix(
- portgroup_ident)
- rpc_node = objects.Node.get_by_id(api.request.context,
- rpc_portgroup.node_id)
-
notify.emit_start_notification(context, rpc_portgroup, 'delete',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_portgroup, 'delete',
diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py
index 7fe557476..90a2a258c 100644
--- a/ironic/api/controllers/v1/utils.py
+++ b/ironic/api/controllers/v1/utils.py
@@ -25,6 +25,7 @@ import jsonschema
from jsonschema import exceptions as json_schema_exc
import os_traits
from oslo_config import cfg
+from oslo_policy import policy as oslo_policy
from oslo_utils import uuidutils
from pecan import rest
@@ -274,6 +275,13 @@ def replace_node_uuid_with_id(to_dict):
node = objects.Node.get_by_uuid(api.request.context,
to_dict.pop('node_uuid'))
to_dict['node_id'] = node.id
+ # if they cannot get the node, then this will error
+ # helping guard access to all users of this method as
+ # users which may have rights at a minimum need to be able
+ # to see the node they are trying to do something with.
+ check_owner_policy('node', 'baremetal:node:get', node['owner'],
+ node['lessee'], conceal_node=node.uuid)
+
except exception.NodeNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for requests acting on non-nodes
@@ -1502,23 +1510,34 @@ def check_policy(policy_name):
policy.authorize(policy_name, cdict, api.request.context)
-def check_owner_policy(object_type, policy_name, owner, lessee=None):
+def check_owner_policy(object_type, policy_name, owner, lessee=None,
+ conceal_node=False):
"""Check if the policy authorizes this request on an object.
:param: object_type: type of object being checked
:param: policy_name: Name of the policy to check.
:param: owner: the owner
:param: lessee: the lessee
+ :param: conceal_node: the UUID of the node IF we should
+ conceal the existence of the node with a
+ 404 Error instead of a 403 Error.
:raises: HTTPForbidden if the policy forbids access.
"""
cdict = api.request.context.to_policy_values()
-
target_dict = dict(cdict)
target_dict[object_type + '.owner'] = owner
if lessee:
target_dict[object_type + '.lessee'] = lessee
- policy.authorize(policy_name, target_dict, api.request.context)
+ try:
+ policy.authorize(policy_name, target_dict, api.request.context)
+ except exception.HTTPForbidden:
+ if conceal_node:
+ # The caller does NOT have access to the node and we've been told
+ # we should return a 404 instead of HTTPForbidden.
+ raise exception.NodeNotFound(node=conceal_node)
+ else:
+ raise
def check_node_policy_and_retrieve(policy_name, node_ident,
@@ -1533,20 +1552,30 @@ def check_node_policy_and_retrieve(policy_name, node_ident,
:raises: NodeNotFound if the node is not found.
:return: RPC node identified by node_ident
"""
+ conceal_node = False
try:
if with_suffix:
rpc_node = get_rpc_node_with_suffix(node_ident)
else:
rpc_node = get_rpc_node(node_ident)
except exception.NodeNotFound:
- # don't expose non-existence of node unless requester
- # has generic access to policy
- cdict = api.request.context.to_policy_values()
- policy.authorize(policy_name, cdict, api.request.context)
raise
-
+ # Project scoped users will get a 404 where as system
+ # scoped should get a 403
+ cdict = api.request.context.to_policy_values()
+ if cdict.get('project_id', False):
+ conceal_node = node_ident
+ try:
+ # Always check the ability to see the node BEFORE anything else.
+ check_owner_policy('node', 'baremetal:node:get', rpc_node['owner'],
+ rpc_node['lessee'], conceal_node=conceal_node)
+ except exception.NotAuthorized:
+ raise exception.NodeNotFound(node=node_ident)
+ # If we've reached here, we can see the node and we have
+ # access to view it.
check_owner_policy('node', policy_name,
- rpc_node['owner'], rpc_node['lessee'])
+ rpc_node['owner'], rpc_node['lessee'],
+ conceal_node=False)
return rpc_node
@@ -1612,7 +1641,9 @@ def check_list_policy(object_type, owner=None):
try:
policy.authorize('baremetal:%s:list_all' % object_type,
cdict, api.request.context)
- except exception.HTTPForbidden:
+ except (exception.HTTPForbidden, oslo_policy.InvalidScope):
+ # In the event the scoped policy fails, falling back to the
+ # policy governing a filtered view.
project_owner = cdict.get('project_id')
if (not project_owner or (owner and owner != project_owner)):
raise
@@ -1622,55 +1653,244 @@ def check_list_policy(object_type, owner=None):
return owner
-def check_port_policy_and_retrieve(policy_name, port_uuid):
+def check_port_policy_and_retrieve(policy_name, port_ident, portgroup=False):
"""Check if the specified policy authorizes this request on a port.
:param: policy_name: Name of the policy to check.
- :param: port_uuid: the UUID of a port.
+ :param: port_ident: The name, uuid, or other valid ID value to find
+ a port or portgroup by.
:raises: HTTPForbidden if the policy forbids access.
:raises: NodeNotFound if the node is not found.
- :return: RPC port identified by port_uuid and associated node
+ :return: RPC port identified by port_ident associated node
"""
context = api.request.context
cdict = context.to_policy_values()
-
+ owner = None
+ lessee = None
try:
- rpc_port = objects.Port.get_by_uuid(context, port_uuid)
- except exception.PortNotFound:
+ if not portgroup:
+ rpc_port = objects.Port.get(context, port_ident)
+ else:
+ rpc_port = objects.Portgroup.get(context, port_ident)
+ except (exception.PortNotFound, exception.PortgroupNotFound):
# don't expose non-existence of port unless requester
# has generic access to policy
- policy.authorize(policy_name, cdict, context)
raise
- rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)
target_dict = dict(cdict)
- target_dict['node.owner'] = rpc_node['owner']
- target_dict['node.lessee'] = rpc_node['lessee']
+ try:
+ rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)
+ owner = rpc_node['owner']
+ lessee = rpc_node['lessee']
+ except exception.NodeNotFound:
+ # There is no spoon, err, node.
+ rpc_node = None
+ pass
+ target_dict = dict(cdict)
+ target_dict['node.owner'] = owner
+ target_dict['node.lessee'] = lessee
+ try:
+ policy.authorize('baremetal:node:get', target_dict, context)
+ except exception.NotAuthorized:
+ if not portgroup:
+ raise exception.PortNotFound(port=port_ident)
+ else:
+ raise exception.PortgroupNotFound(portgroup=port_ident)
+
policy.authorize(policy_name, target_dict, context)
return rpc_port, rpc_node
-def check_port_list_policy():
+def check_port_list_policy(portgroup=False, parent_node=None,
+ parent_portgroup=None):
"""Check if the specified policy authorizes this request on a port.
+ :param portgroup: Boolean value, default false, indicating if the list
+ policy check is for a portgroup as the policy names
+ are different between ports and portgroups.
+ :param parent_node: The UUID of a node, if any, to apply a policy
+ check to as well before applying other policy
+ check operations.
+ :param parent_portgroup: The UUID of the parent portgroup if the list
+ of ports was retrieved via the
+ /v1/portgroups/<uuid>/ports.
+
:raises: HTTPForbidden if the policy forbids access.
:return: owner that should be used for list query, if needed
"""
+
cdict = api.request.context.to_policy_values()
+
+ # No node is associated with this request, yet.
+ rpc_node = None
+ conceal_linked_node = None
+
+ if parent_portgroup:
+ # lookup the portgroup via the db, and then set parent_node
+ rpc_portgroup = objects.Portgroup.get_by_uuid(api.request.context,
+ parent_portgroup)
+ rpc_node = objects.Node.get_by_id(api.request.context,
+ rpc_portgroup.node_id)
+ parent_node = rpc_node.uuid
+
+ if parent_node and not rpc_node:
+ try:
+ rpc_node = objects.Node.get_by_uuid(api.request.context,
+ parent_node)
+ conceal_linked_node = rpc_node.uuid
+ except exception.NotFound:
+ # NOTE(TheJulia): This only covers portgroups since
+ # you can't go from ports to other items.
+ raise exception.PortgroupNotFound(portgroup=parent_portgroup)
+
+ if parent_node:
+ try:
+ check_owner_policy(
+ 'node', 'baremetal:node:get',
+ rpc_node.owner, rpc_node.lessee,
+ conceal_node=conceal_linked_node)
+ except exception.NotAuthorized:
+ if parent_portgroup:
+ # If this call was invoked with a parent portgroup
+ # then we need to signal the parent portgroup was not
+ # found.
+ raise exception.PortgroupNotFound(
+ portgroup=parent_portgroup)
+ if parent_node:
+ # This should likely never be hit, because
+ # the existence of a parent node should
+ # trigger the node not found exception to be
+ # explicitly raised.
+ raise exception.NodeNotFound(
+ node=parent_node)
+ raise
+
try:
- policy.authorize('baremetal:port:list_all',
- cdict, api.request.context)
+ if not portgroup:
+ policy.authorize('baremetal:port:list_all',
+ cdict, api.request.context)
+ else:
+ policy.authorize('baremetal:portgroup:list_all',
+ cdict, api.request.context)
except exception.HTTPForbidden:
owner = cdict.get('project_id')
if not owner:
raise
- policy.authorize('baremetal:port:list',
- cdict, api.request.context)
+ if not portgroup:
+ policy.authorize('baremetal:port:list',
+ cdict, api.request.context)
+ else:
+ policy.authorize('baremetal:portgroup:list',
+ cdict, api.request.context)
return owner
+def check_volume_list_policy(parent_node=None):
+ """Check if the specified policy authorizes this request on a volume.
+
+ :param parent_node: The UUID of a node, if any, to apply a policy
+ check to as well before applying other policy
+ check operations.
+
+ :raises: HTTPForbidden if the policy forbids access.
+ :return: owner that should be used for list query, if needed
+ """
+
+ cdict = api.request.context.to_policy_values()
+
+ # No node is associated with this request, yet.
+ rpc_node = None
+ conceal_linked_node = None
+
+ if parent_node:
+ try:
+ rpc_node = objects.Node.get_by_uuid(api.request.context,
+ parent_node)
+ conceal_linked_node = rpc_node.uuid
+ except exception.NotFound:
+ raise exception.NodeNotFound(node=parent_node)
+ if parent_node:
+ try:
+ check_owner_policy(
+ 'node', 'baremetal:node:get',
+ rpc_node.owner, rpc_node.lessee,
+ conceal_node=conceal_linked_node)
+ except exception.NotAuthorized:
+ if parent_node:
+ # This should likely never be hit, because
+ # the existence of a parent node should
+ # trigger the node not found exception to be
+ # explicitly raised.
+ raise exception.NodeNotFound(
+ node=parent_node)
+ raise
+
+ try:
+ policy.authorize('baremetal:volume:list_all',
+ cdict, api.request.context)
+ except exception.HTTPForbidden:
+ project_id = cdict.get('project_id')
+ if not project_id:
+ raise
+ policy.authorize('baremetal:volume:list',
+ cdict, api.request.context)
+ return project_id
+
+
+def check_volume_policy_and_retrieve(policy_name, vol_ident, target=False):
+ """Check if the specified policy authorizes this request on a volume.
+
+ :param: policy_name: Name of the policy to check.
+ :param: vol_ident: The name, uuid, or other valid ID value to find
+ a volume target or connector by.
+ :param: target: Boolean value to indicate if the check is for a volume
+ target or connector. Default value is False, implying
+ connector.
+
+ :raises: HTTPForbidden if the policy forbids access.
+ :raises: VolumeConnectorNotFound if the node is not found.
+ :raises: VolumeTargetNotFound if the node is not found.
+ :return: RPC port identified by port_ident associated node
+ """
+ context = api.request.context
+ cdict = context.to_policy_values()
+ owner = None
+ lessee = None
+ try:
+ if not target:
+ rpc_vol = objects.VolumeConnector.get(context, vol_ident)
+ else:
+ rpc_vol = objects.VolumeTarget.get(context, vol_ident)
+ except (exception.VolumeConnectorNotFound, exception.VolumeTargetNotFound):
+ # don't expose non-existence of volume unless requester
+ # has generic access to policy
+ raise
+
+ target_dict = dict(cdict)
+ try:
+ rpc_node = objects.Node.get_by_id(context, rpc_vol.node_id)
+ owner = rpc_node['owner']
+ lessee = rpc_node['lessee']
+ except exception.NodeNotFound:
+ pass
+ target_dict = dict(cdict)
+ target_dict['node.owner'] = owner
+ target_dict['node.lessee'] = lessee
+ try:
+ policy.authorize('baremetal:node:get', target_dict, context)
+ except exception.NotAuthorized:
+ if not target:
+ raise exception.VolumeConnectorNotFound(connector=vol_ident)
+ else:
+ raise exception.VolumeTargetNotFound(target=vol_ident)
+
+ policy.authorize(policy_name, target_dict, context)
+
+ return rpc_vol, rpc_node
+
+
def allow_build_configdrive():
"""Check if building configdrive is allowed.
diff --git a/ironic/api/controllers/v1/volume_connector.py b/ironic/api/controllers/v1/volume_connector.py
index 0a6ffa4d5..100822029 100644
--- a/ironic/api/controllers/v1/volume_connector.py
+++ b/ironic/api/controllers/v1/volume_connector.py
@@ -111,7 +111,8 @@ class VolumeConnectorsController(rest.RestController):
def _get_volume_connectors_collection(self, node_ident, marker, limit,
sort_key, sort_dir,
resource_url=None,
- fields=None, detail=None):
+ fields=None, detail=None,
+ project=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
@@ -135,13 +136,15 @@ class VolumeConnectorsController(rest.RestController):
node = api_utils.get_rpc_node(node_ident)
connectors = objects.VolumeConnector.list_by_node_id(
api.request.context, node.id, limit, marker_obj,
- sort_key=sort_key, sort_dir=sort_dir)
+ sort_key=sort_key, sort_dir=sort_dir,
+ project=project)
else:
connectors = objects.VolumeConnector.list(api.request.context,
limit,
marker_obj,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
return list_convert_with_links(connectors, limit,
url=resource_url,
fields=fields,
@@ -156,7 +159,7 @@ class VolumeConnectorsController(rest.RestController):
sort_dir=args.string, fields=args.string_list,
detail=args.boolean)
def get_all(self, node=None, marker=None, limit=None, sort_key='id',
- sort_dir='asc', fields=None, detail=None):
+ sort_dir='asc', fields=None, detail=None, project=None):
"""Retrieve a list of volume connectors.
:param node: UUID or name of a node, to get only volume connectors
@@ -179,7 +182,8 @@ class VolumeConnectorsController(rest.RestController):
:raises: InvalidParameterValue if sort key is invalid for sorting.
:raises: InvalidParameterValue if both fields and detail are specified.
"""
- api_utils.check_policy('baremetal:volume:get')
+ project = api_utils.check_volume_list_policy(
+ parent_node=self.parent_node_ident)
if fields is None and not detail:
fields = _DEFAULT_RETURN_FIELDS
@@ -191,7 +195,7 @@ class VolumeConnectorsController(rest.RestController):
resource_url = 'volume/connectors'
return self._get_volume_connectors_collection(
node, marker, limit, sort_key, sort_dir, resource_url=resource_url,
- fields=fields, detail=detail)
+ fields=fields, detail=detail, project=project)
@METRICS.timer('VolumeConnectorsController.get_one')
@method.expose()
@@ -210,13 +214,15 @@ class VolumeConnectorsController(rest.RestController):
:raises: VolumeConnectorNotFound if no volume connector exists with
the specified UUID.
"""
- api_utils.check_policy('baremetal:volume:get')
+
+ rpc_connector, _ = api_utils.check_volume_policy_and_retrieve(
+ 'baremetal:volume:get',
+ connector_uuid,
+ target=False)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
- rpc_connector = objects.VolumeConnector.get_by_uuid(
- api.request.context, connector_uuid)
return convert_with_links(rpc_connector, fields=fields)
@METRICS.timer('VolumeConnectorsController.post')
@@ -238,7 +244,23 @@ class VolumeConnectorsController(rest.RestController):
same UUID already exists
"""
context = api.request.context
- api_utils.check_policy('baremetal:volume:create')
+ owner = None
+ lessee = None
+ raise_node_not_found = False
+ node_uuid = connector.get('node_uuid')
+
+ try:
+ node = api_utils.replace_node_uuid_with_id(connector)
+ owner = node.owner
+ lessee = node.lessee
+ except exception.NotFound:
+ raise_node_not_found = True
+ api_utils.check_owner_policy('node', 'baremetal:volume:create',
+ owner, lessee=lessee, conceal_node=False)
+
+ if raise_node_not_found:
+ raise exception.InvalidInput(fieldname='node_uuid',
+ value=node_uuid)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
@@ -247,8 +269,6 @@ class VolumeConnectorsController(rest.RestController):
if not connector.get('uuid'):
connector['uuid'] = uuidutils.generate_uuid()
- node = api_utils.replace_node_uuid_with_id(connector)
-
new_connector = objects.VolumeConnector(context, **connector)
notify.emit_start_notification(context, new_connector, 'create',
@@ -294,7 +314,11 @@ class VolumeConnectorsController(rest.RestController):
volume connector is not powered off.
"""
context = api.request.context
- api_utils.check_policy('baremetal:volume:update')
+
+ rpc_connector, rpc_node = api_utils.check_volume_policy_and_retrieve(
+ 'baremetal:volume:update',
+ connector_uuid,
+ target=False)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
@@ -307,9 +331,6 @@ class VolumeConnectorsController(rest.RestController):
"%(uuid)s.") % {'uuid': str(value)}
raise exception.InvalidUUID(message=message)
- rpc_connector = objects.VolumeConnector.get_by_uuid(context,
- connector_uuid)
-
connector_dict = rpc_connector.as_dict()
# NOTE(smoriya):
# 1) Remove node_id because it's an internal value and
@@ -370,14 +391,14 @@ class VolumeConnectorsController(rest.RestController):
volume connector is not powered off.
"""
context = api.request.context
- api_utils.check_policy('baremetal:volume:delete')
+ rpc_connector, rpc_node = api_utils.check_volume_policy_and_retrieve(
+ 'baremetal:volume:delete',
+ connector_uuid,
+ target=False)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
- rpc_connector = objects.VolumeConnector.get_by_uuid(context,
- connector_uuid)
- rpc_node = objects.Node.get_by_id(context, rpc_connector.node_id)
notify.emit_start_notification(context, rpc_connector, 'delete',
node_uuid=rpc_node.uuid)
with notify.handle_error_notification(context, rpc_connector,
diff --git a/ironic/api/controllers/v1/volume_target.py b/ironic/api/controllers/v1/volume_target.py
index 9fa5f8909..d98f461ed 100644
--- a/ironic/api/controllers/v1/volume_target.py
+++ b/ironic/api/controllers/v1/volume_target.py
@@ -27,6 +27,7 @@ from ironic.api import method
from ironic.common import args
from ironic.common import exception
from ironic.common.i18n import _
+from ironic.common import policy
from ironic import objects
METRICS = metrics_utils.get_metrics_logger(__name__)
@@ -119,9 +120,22 @@ class VolumeTargetsController(rest.RestController):
super(VolumeTargetsController, self).__init__()
self.parent_node_ident = node_ident
+ def _redact_target_properties(self, target):
+ # Filters what could contain sensitive information. For iSCSI
+ # volumes this can include iscsi connection details which may
+ # be sensitive.
+ redacted = ('** Value redacted: Requires permission '
+ 'baremetal:volume:view_target_properties '
+ 'access. Permission denied. **')
+ redacted_message = {
+ 'redacted_contents': redacted
+ }
+ target.properties = redacted_message
+
def _get_volume_targets_collection(self, node_ident, marker, limit,
sort_key, sort_dir, resource_url=None,
- fields=None, detail=None):
+ fields=None, detail=None,
+ project=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
@@ -134,7 +148,6 @@ class VolumeTargetsController(rest.RestController):
raise exception.InvalidParameterValue(
_("The sort_key value %(key)s is an invalid field for "
"sorting") % {'key': sort_key})
-
node_ident = self.parent_node_ident or node_ident
if node_ident:
@@ -145,12 +158,19 @@ class VolumeTargetsController(rest.RestController):
node = api_utils.get_rpc_node(node_ident)
targets = objects.VolumeTarget.list_by_node_id(
api.request.context, node.id, limit, marker_obj,
- sort_key=sort_key, sort_dir=sort_dir)
+ sort_key=sort_key, sort_dir=sort_dir, project=project)
else:
targets = objects.VolumeTarget.list(api.request.context,
limit, marker_obj,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
+ cdict = api.request.context.to_policy_values()
+ if not policy.check_policy('baremetal:volume:view_target_properties',
+ cdict, cdict):
+ for target in targets:
+ self._redact_target_properties(target)
+
return list_convert_with_links(targets, limit,
url=resource_url,
fields=fields,
@@ -165,7 +185,7 @@ class VolumeTargetsController(rest.RestController):
sort_dir=args.string, fields=args.string_list,
detail=args.boolean)
def get_all(self, node=None, marker=None, limit=None, sort_key='id',
- sort_dir='asc', fields=None, detail=None):
+ sort_dir='asc', fields=None, detail=None, project=None):
"""Retrieve a list of volume targets.
:param node: UUID or name of a node, to get only volume targets
@@ -180,6 +200,8 @@ class VolumeTargetsController(rest.RestController):
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:param detail: Optional, whether to retrieve with detail.
+ :param project: Optional, an associated node project (owner,
+ or lessee) to filter the query upon.
:returns: a list of volume targets, or an empty list if no volume
target is found.
@@ -188,8 +210,8 @@ class VolumeTargetsController(rest.RestController):
:raises: InvalidParameterValue if sort key is invalid for sorting.
:raises: InvalidParameterValue if both fields and detail are specified.
"""
- api_utils.check_policy('baremetal:volume:get')
-
+ project = api_utils.check_volume_list_policy(
+ parent_node=self.parent_node_ident)
if fields is None and not detail:
fields = _DEFAULT_RETURN_FIELDS
@@ -202,7 +224,8 @@ class VolumeTargetsController(rest.RestController):
sort_key, sort_dir,
resource_url=resource_url,
fields=fields,
- detail=detail)
+ detail=detail,
+ project=project)
@METRICS.timer('VolumeTargetsController.get_one')
@method.expose()
@@ -220,13 +243,20 @@ class VolumeTargetsController(rest.RestController):
node.
:raises: VolumeTargetNotFound if no volume target with this UUID exists
"""
- api_utils.check_policy('baremetal:volume:get')
+
+ rpc_target, _ = api_utils.check_volume_policy_and_retrieve(
+ 'baremetal:volume:get',
+ target_uuid,
+ target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
- rpc_target = objects.VolumeTarget.get_by_uuid(
- api.request.context, target_uuid)
+ cdict = api.request.context.to_policy_values()
+ if not policy.check_policy('baremetal:volume:view_target_properties',
+ cdict, cdict):
+ self._redact_target_properties(rpc_target)
+
return convert_with_links(rpc_target, fields=fields)
@METRICS.timer('VolumeTargetsController.post')
@@ -248,7 +278,23 @@ class VolumeTargetsController(rest.RestController):
UUID exists
"""
context = api.request.context
- api_utils.check_policy('baremetal:volume:create')
+ raise_node_not_found = False
+ node = None
+ owner = None
+ lessee = None
+ node_uuid = target.get('node_uuid')
+ try:
+ node = api_utils.replace_node_uuid_with_id(target)
+ owner = node.owner
+ lessee = node.lessee
+ except exception.NotFound:
+ raise_node_not_found = True
+ api_utils.check_owner_policy('node', 'baremetal:volume:create',
+ owner, lessee=lessee,
+ conceal_node=False)
+ if raise_node_not_found:
+ raise exception.InvalidInput(fieldname='node_uuid',
+ value=node_uuid)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
@@ -256,9 +302,6 @@ class VolumeTargetsController(rest.RestController):
# NOTE(hshiina): UUID is mandatory for notification payload
if not target.get('uuid'):
target['uuid'] = uuidutils.generate_uuid()
-
- node = api_utils.replace_node_uuid_with_id(target)
-
new_target = objects.VolumeTarget(context, **target)
notify.emit_start_notification(context, new_target, 'create',
@@ -301,7 +344,10 @@ class VolumeTargetsController(rest.RestController):
volume target is not powered off.
"""
context = api.request.context
- api_utils.check_policy('baremetal:volume:update')
+
+ api_utils.check_volume_policy_and_retrieve('baremetal:volume:update',
+ target_uuid,
+ target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
@@ -327,6 +373,10 @@ class VolumeTargetsController(rest.RestController):
try:
if target_dict['node_uuid'] != rpc_node.uuid:
+
+ # TODO(TheJulia): I guess the intention is to
+ # permit the mapping to be changed
+ # should we even allow this at all?
rpc_node = objects.Node.get(
api.request.context, target_dict['node_uuid'])
except exception.NodeNotFound as e:
@@ -374,7 +424,10 @@ class VolumeTargetsController(rest.RestController):
volume target is not powered off.
"""
context = api.request.context
- api_utils.check_policy('baremetal:volume:delete')
+
+ api_utils.check_volume_policy_and_retrieve('baremetal:volume:delete',
+ target_uuid,
+ target=True)
if self.parent_node_ident:
raise exception.OperationNotPermitted()
diff --git a/ironic/common/driver_factory.py b/ironic/common/driver_factory.py
index 077598019..37ce123fe 100644
--- a/ironic/common/driver_factory.py
+++ b/ironic/common/driver_factory.py
@@ -69,7 +69,9 @@ def _attach_interfaces_to_driver(bare_driver, node, hw_type):
the requested implementation is not compatible with it.
"""
for iface in _INTERFACE_LOADERS:
- impl_name = getattr(node, '%s_interface' % iface)
+ iface_name = '%s_interface' % iface
+ impl_name = node.instance_info.get(iface_name,
+ getattr(node, iface_name))
impl = get_interface(hw_type, iface, impl_name)
setattr(bare_driver, iface, impl)
@@ -204,20 +206,29 @@ def check_and_update_node_interfaces(node, hw_type=None):
# NOTE(dtantsur): objects raise NotImplementedError on accessing fields
# that are known, but missing from an object. Thus, we cannot just use
# getattr(node, field_name, None) here.
+ set_default = True
+ if 'instance_info' in node and field_name in node.instance_info:
+ impl_name = node.instance_info.get(field_name)
+ if impl_name is not None:
+ # Check that the provided value is correct for this type
+ get_interface(hw_type, iface, impl_name)
+ set_default = False
+
if field_name in node:
impl_name = getattr(node, field_name)
if impl_name is not None:
# Check that the provided value is correct for this type
get_interface(hw_type, iface, impl_name)
- # Not changing the result, proceeding with the next interface
- continue
+ set_default = False
- impl_name = default_interface(hw_type, iface,
- driver_name=node.driver, node=node.uuid)
+ if set_default:
+ impl_name = default_interface(hw_type, iface,
+ driver_name=node.driver,
+ node=node.uuid)
- # Set the calculated default and set result to True
- setattr(node, field_name, impl_name)
- result = True
+ # Set the calculated default and set result to True
+ setattr(node, field_name, impl_name)
+ result = True
return result
diff --git a/ironic/common/policy.py b/ironic/common/policy.py
index 57722f6fb..5ffb373ab 100644
--- a/ironic/common/policy.py
+++ b/ironic/common/policy.py
@@ -80,13 +80,51 @@ PROJECT_READER = ('role:reader and '
# protecting APIs designed to operate with multiple scopes (e.g., a system
# administrator should be able to delete any baremetal host in the deployment,
# a project member should only be able to delete hosts in their project).
-SYSTEM_ADMIN_OR_PROJECT_MEMBER = (
- '(' + SYSTEM_ADMIN + ') or (' + PROJECT_MEMBER + ')'
+SYSTEM_OR_PROJECT_MEMBER = (
+ '(' + SYSTEM_MEMBER + ') or (' + PROJECT_MEMBER + ')'
)
SYSTEM_OR_PROJECT_READER = (
'(' + SYSTEM_READER + ') or (' + PROJECT_READER + ')'
)
+PROJECT_OWNER_ADMIN = ('role:admin and project_id:%(node.owner)s')
+PROJECT_OWNER_MEMBER = ('role:member and project_id:%(node.owner)s')
+PROJECT_OWNER_READER = ('role:reader and project_id:%(node.owner)s')
+PROJECT_LESSEE_ADMIN = ('role:admin and project_id:%(node.lessee)s')
+
+SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN = (
+ '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_LESSEE_ADMIN + ')' # noqa
+)
+
+SYSTEM_ADMIN_OR_OWNER_ADMIN = (
+ '(' + SYSTEM_ADMIN + ') or (' + PROJECT_OWNER_ADMIN + ')'
+)
+
+SYSTEM_MEMBER_OR_OWNER_ADMIN = (
+ '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_ADMIN + ')'
+)
+
+SYSTEM_MEMBER_OR_OWNER_MEMBER = (
+ '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_MEMBER + ')'
+)
+
+SYSTEM_OR_OWNER_READER = (
+ '(' + SYSTEM_READER + ') or (' + PROJECT_OWNER_READER + ')'
+)
+
+SYSTEM_MEMBER_OR_OWNER_LESSEE_ADMIN = (
+ '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_LESSEE_ADMIN + ')' # noqa
+)
+
+
+# Special purpose aliases for things like "ability to access the API
+# as a reader, or permission checking that does not require node
+# owner relationship checking
+API_READER = ('role:reader')
+TARGET_PROPERTIES_READER = (
+ '(' + SYSTEM_READER + ') or (role:admin)'
+)
+
default_policies = [
# Legacy setting, don't remove. Likely to be overridden by operators who
# forget to update their policy.json configuration file.
@@ -283,20 +321,11 @@ node_policies = [
deprecated_since=versionutils.deprecated.WALLABY
),
policy.DocumentedRuleDefault(
- name='baremetal:node:get',
- check_str=SYSTEM_READER,
- scope_types=['system'],
- description='Retrieve a single Node record',
- operations=[{'path': '/nodes/{node_ident}', 'method': 'GET'}],
- deprecated_rule=deprecated_node_get,
- deprecated_reason=deprecated_node_reason,
- deprecated_since=versionutils.deprecated.WALLABY
- ),
- policy.DocumentedRuleDefault(
name='baremetal:node:list',
- check_str=SYSTEM_READER,
- scope_types=['system'],
- description='Retrieve multiple Node records, filtered by owner',
+ check_str=API_READER,
+ scope_types=['system', 'project'],
+ description='Retrieve multiple Node records, filtered by '
+ 'an explicit owner or the client project_id',
operations=[{'path': '/nodes', 'method': 'GET'},
{'path': '/nodes/detail', 'method': 'GET'}],
deprecated_rule=deprecated_node_list,
@@ -315,38 +344,246 @@ node_policies = [
deprecated_since=versionutils.deprecated.WALLABY
),
policy.DocumentedRuleDefault(
- name='baremetal:node:update',
+ name='baremetal:node:get',
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ description='Retrieve a single Node record',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'GET'}],
+ deprecated_rule=deprecated_node_get,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:get:filter_threshold',
+ check_str=SYSTEM_READER,
+ scope_types=['system', 'project'],
+ description='Filter to allow operators to govern the threshold '
+ 'where information should be filtered. Non-authorized '
+ 'users will be subjected to additional API policy '
+ 'checks for API content response bodies.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'GET'}],
+ # This rule fallsback to deprecated_node_get in order to provide a
+ # mechanism so the additional policies only engage in an updated
+ # operating context.
+ deprecated_rule=deprecated_node_get,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY,
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:get:last_error',
+ check_str=SYSTEM_OR_OWNER_READER,
+ scope_types=['system', 'project'],
+ description='Governs if the node last_error field is masked from API'
+ 'clients with insufficent privileges.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'GET'}],
+ deprecated_rule=deprecated_node_get,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:get:reservation',
+ check_str=SYSTEM_OR_OWNER_READER,
+ scope_types=['system', 'project'],
+ description='Governs if the node reservation field is masked from API'
+ 'clients with insufficent privileges.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'GET'}],
+ deprecated_rule=deprecated_node_get,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:get:driver_internal_info',
+ check_str=SYSTEM_OR_OWNER_READER,
+ scope_types=['system', 'project'],
+ description='Governs if the node driver_internal_info field is '
+ 'masked from API clients with insufficent privileges.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'GET'}],
+ deprecated_rule=deprecated_node_get,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:get:driver_info',
+ check_str=SYSTEM_OR_OWNER_READER,
+ scope_types=['system', 'project'],
+ description='Governs if the driver_info field is masked from API'
+ 'clients with insufficent privileges.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'GET'}],
+ deprecated_rule=deprecated_node_get,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:driver_info',
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
+ description='Governs if node driver_info field can be updated via '
+ 'the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:properties',
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
+ description='Governs if node properties field can be updated via '
+ 'the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:chassis_uuid',
+ check_str=SYSTEM_ADMIN,
+ scope_types=['system', 'project'],
+ description='Governs if node chassis_uuid field can be updated via '
+ 'the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:instance_uuid',
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
+ description='Governs if node instance_uuid field can be updated via '
+ 'the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:lessee',
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
+ description='Governs if node lessee field can be updated via '
+ 'the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:owner',
check_str=SYSTEM_MEMBER,
- scope_types=['system'],
- description='Update Node records',
+ scope_types=['system', 'project'],
+ description='Governs if node owner field can be updated via '
+ 'the API clients.',
operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
deprecated_rule=deprecated_node_update,
deprecated_reason=deprecated_node_reason,
deprecated_since=versionutils.deprecated.WALLABY
),
- # TODO(TheJulia): Explicit RBAC testing needed for this.
policy.DocumentedRuleDefault(
- name='baremetal:node:update_extra',
+ name='baremetal:node:update:driver_interfaces',
+ check_str=SYSTEM_MEMBER_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
+ description='Governs if node driver and driver interfaces field '
+ 'can be updated via the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:network_data',
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
+ description='Governs if node driver_info field can be updated via '
+ 'the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:conductor_group',
check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ scope_types=['system', 'project'],
+ description='Governs if node conductor_group field can be updated '
+ 'via the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:name',
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
+ description='Governs if node name field can be updated via '
+ 'the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:retired',
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
+ description='Governs if node retired and retired reason '
+ 'can be updated by API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+
+ # If this role is denied we should likely roll into the other rules
+ # Like, this rule could match "SYSTEM_MEMBER" by default and then drill
+ # further into each field, which would maintain what we do today, and
+ # enable further testing.
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update',
+ check_str=SYSTEM_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ description='Generalized update of node records',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ deprecated_rule=deprecated_node_update,
+ deprecated_reason=deprecated_node_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update_extra',
+ check_str=SYSTEM_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Update Node extra field',
operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
deprecated_rule=deprecated_node_update_extra,
deprecated_reason=deprecated_node_reason,
deprecated_since=versionutils.deprecated.WALLABY
),
- # TODO(TheJulia): Explicit RBAC testing needed for this.
+ # TODO(TheJulia): So multiple additional fields need policies. This needs
+ # to be reviewed/audited/addressed.
+ # * Get ability on last_error - policy added
+ # * Get ability on reservation (conductor names) - policy added
+ # * get ability on driver_internal_info (internal addressing) added
+ # * ability to get driver_info - policy added
+ # * ability to set driver_info - policy added
+ # * ability to set properties. - added
+ # * ability to set chassis_uuid - added
+ # * ability to set instance_uuid - added
+ # * ability to set a lessee - default only to admin or owner. added
+ # * ability to set driver/*_interface - added
+ # * ability to set network_data - added
+ # * ability to set conductor_group -added
+ # * ability to set name -added
policy.DocumentedRuleDefault(
name='baremetal:node:update_instance_info',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Update Node instance_info field',
operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
deprecated_rule=deprecated_node_update_instance_info,
deprecated_reason=deprecated_node_reason,
deprecated_since=versionutils.deprecated.WALLABY
),
- # TODO(TheJulia): Explicit RBAC testing needed for this.
policy.DocumentedRuleDefault(
name='baremetal:node:update_owner_provisioned',
check_str=SYSTEM_MEMBER,
@@ -357,11 +594,10 @@ node_policies = [
deprecated_reason=deprecated_node_reason,
deprecated_since=versionutils.deprecated.WALLABY
),
- # TODO(TheJulia): Explicit RBAC testing needed for this... Maybe?
policy.DocumentedRuleDefault(
name='baremetal:node:delete',
check_str=SYSTEM_ADMIN,
- scope_types=['system'],
+ scope_types=['system', 'project'],
description='Delete Node records',
operations=[{'path': '/nodes/{node_ident}', 'method': 'DELETE'}],
deprecated_rule=deprecated_node_delete,
@@ -371,8 +607,8 @@ node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:validate',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Request active validation of Nodes',
operations=[
{'path': '/nodes/{node_ident}/validate', 'method': 'GET'}
@@ -384,8 +620,8 @@ node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:set_maintenance',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Set maintenance flag, taking a Node out of service',
operations=[
{'path': '/nodes/{node_ident}/maintenance', 'method': 'PUT'}
@@ -396,8 +632,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:clear_maintenance',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description=(
'Clear maintenance flag, placing the Node into service again'
),
@@ -413,8 +649,8 @@ node_policies = [
# a cached object.
policy.DocumentedRuleDefault(
name='baremetal:node:get_boot_device',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Retrieve Node boot device metadata',
operations=[
{'path': '/nodes/{node_ident}/management/boot_device',
@@ -428,8 +664,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:set_boot_device',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Change Node boot device',
operations=[
{'path': '/nodes/{node_ident}/management/boot_device',
@@ -442,8 +678,8 @@ node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:get_indicator_state',
- check_str=SYSTEM_READER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Retrieve Node indicators and their states',
operations=[
{'path': '/nodes/{node_ident}/management/indicators/'
@@ -458,8 +694,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:set_indicator_state',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
description='Change Node indicator state',
operations=[
{'path': '/nodes/{node_ident}/management/indicators/'
@@ -473,8 +709,8 @@ node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:inject_nmi',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Inject NMI for a node',
operations=[
{'path': '/nodes/{node_ident}/management/inject_nmi',
@@ -487,8 +723,8 @@ node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:get_states',
- check_str=SYSTEM_READER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='View Node power and provision state',
operations=[{'path': '/nodes/{node_ident}/states', 'method': 'GET'}],
deprecated_rule=deprecated_node_get_states,
@@ -497,8 +733,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:set_power_state',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Change Node power status',
operations=[
{'path': '/nodes/{node_ident}/states/power', 'method': 'PUT'}
@@ -509,8 +745,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:set_provision_state',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Change Node provision status',
operations=[
{'path': '/nodes/{node_ident}/states/provision', 'method': 'PUT'}
@@ -521,8 +757,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:set_raid_state',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
description='Change Node RAID status',
operations=[
{'path': '/nodes/{node_ident}/states/raid', 'method': 'PUT'}
@@ -533,8 +769,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:get_console',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
description='Get Node console connection information',
operations=[
{'path': '/nodes/{node_ident}/states/console', 'method': 'GET'}
@@ -545,8 +781,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:set_console_state',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_MEMBER,
+ scope_types=['system', 'project'],
description='Change Node console status',
operations=[
{'path': '/nodes/{node_ident}/states/console', 'method': 'PUT'}
@@ -558,8 +794,8 @@ node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:vif:list',
- check_str=SYSTEM_READER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List VIFs attached to node',
operations=[{'path': '/nodes/{node_ident}/vifs', 'method': 'GET'}],
deprecated_rule=deprecated_node_vif_list,
@@ -568,8 +804,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:vif:attach',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Attach a VIF to a node',
operations=[{'path': '/nodes/{node_ident}/vifs', 'method': 'POST'}],
deprecated_rule=deprecated_node_vif_attach,
@@ -578,8 +814,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:vif:detach',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Detach a VIF from a node',
operations=[
{'path': '/nodes/{node_ident}/vifs/{node_vif_ident}',
@@ -589,11 +825,10 @@ node_policies = [
deprecated_reason=deprecated_node_reason,
deprecated_since=versionutils.deprecated.WALLABY
),
-
policy.DocumentedRuleDefault(
name='baremetal:node:traits:list',
- check_str=SYSTEM_READER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List node traits',
operations=[{'path': '/nodes/{node_ident}/traits', 'method': 'GET'}],
deprecated_rule=deprecated_node_traits_list,
@@ -602,8 +837,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:traits:set',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Add a trait to, or replace all traits of, a node',
operations=[
{'path': '/nodes/{node_ident}/traits', 'method': 'PUT'},
@@ -615,8 +850,8 @@ node_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:node:traits:delete',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Remove one or all traits from a node',
operations=[
{'path': '/nodes/{node_ident}/traits', 'method': 'DELETE'},
@@ -630,8 +865,8 @@ node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:bios:get',
- check_str=SYSTEM_READER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Retrieve Node BIOS information',
operations=[
{'path': '/nodes/{node_ident}/bios', 'method': 'GET'},
@@ -686,8 +921,8 @@ The baremetal port API is now aware of system scope and default roles.
port_policies = [
policy.DocumentedRuleDefault(
name='baremetal:port:get',
- check_str=SYSTEM_READER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Retrieve Port records',
operations=[
{'path': '/ports/{port_id}', 'method': 'GET'},
@@ -703,8 +938,8 @@ port_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:port:list',
- check_str=SYSTEM_READER,
- scope_types=['system'],
+ check_str=API_READER,
+ scope_types=['system', 'project'],
description='Retrieve multiple Port records, filtered by owner',
operations=[
{'path': '/ports', 'method': 'GET'},
@@ -717,7 +952,7 @@ port_policies = [
policy.DocumentedRuleDefault(
name='baremetal:port:list_all',
check_str=SYSTEM_READER,
- scope_types=['system'],
+ scope_types=['system', 'project'],
description='Retrieve multiple Port records',
operations=[
{'path': '/ports', 'method': 'GET'},
@@ -729,8 +964,8 @@ port_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:port:create',
- check_str=SYSTEM_ADMIN,
- scope_types=['system'],
+ check_str=SYSTEM_ADMIN_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Create Port records',
operations=[{'path': '/ports', 'method': 'POST'}],
deprecated_rule=deprecated_port_create,
@@ -739,8 +974,8 @@ port_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:port:delete',
- check_str=SYSTEM_ADMIN,
- scope_types=['system'],
+ check_str=SYSTEM_ADMIN_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Delete Port records',
operations=[{'path': '/ports/{port_id}', 'method': 'DELETE'}],
deprecated_rule=deprecated_port_delete,
@@ -749,8 +984,8 @@ port_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:port:update',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Update Port records',
operations=[{'path': '/ports/{port_id}', 'method': 'PATCH'}],
deprecated_rule=deprecated_port_update,
@@ -782,8 +1017,8 @@ The baremetal port groups API is now aware of system scope and default roles.
portgroup_policies = [
policy.DocumentedRuleDefault(
name='baremetal:portgroup:get',
- check_str=SYSTEM_READER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Retrieve Portgroup records',
operations=[
{'path': '/portgroups', 'method': 'GET'},
@@ -798,8 +1033,8 @@ portgroup_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:portgroup:create',
- check_str=SYSTEM_ADMIN,
- scope_types=['system'],
+ check_str=SYSTEM_ADMIN_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Create Portgroup records',
operations=[{'path': '/portgroups', 'method': 'POST'}],
deprecated_rule=deprecated_portgroup_create,
@@ -808,8 +1043,8 @@ portgroup_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:portgroup:delete',
- check_str=SYSTEM_ADMIN,
- scope_types=['system'],
+ check_str=SYSTEM_ADMIN_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Delete Portgroup records',
operations=[
{'path': '/portgroups/{portgroup_ident}', 'method': 'DELETE'}
@@ -820,8 +1055,8 @@ portgroup_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:portgroup:update',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_ADMIN,
+ scope_types=['system', 'project'],
description='Update Portgroup records',
operations=[
{'path': '/portgroups/{portgroup_ident}', 'method': 'PATCH'}
@@ -830,6 +1065,32 @@ portgroup_policies = [
deprecated_reason=deprecated_portgroup_reason,
deprecated_since=versionutils.deprecated.WALLABY
),
+ policy.DocumentedRuleDefault(
+ name='baremetal:portgroup:list',
+ check_str=API_READER,
+ scope_types=['system', 'project'],
+ description='Retrieve multiple Port records, filtered by owner',
+ operations=[
+ {'path': '/portgroups', 'method': 'GET'},
+ {'path': '/portgroups/detail', 'method': 'GET'}
+ ],
+ deprecated_rule=deprecated_portgroup_get,
+ deprecated_reason=deprecated_portgroup_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:portgroup:list_all',
+ check_str=SYSTEM_READER,
+ scope_types=['system', 'project'],
+ description='Retrieve multiple Port records',
+ operations=[
+ {'path': '/portgroups', 'method': 'GET'},
+ {'path': '/portgroups/detail', 'method': 'GET'}
+ ],
+ deprecated_rule=deprecated_portgroup_get,
+ deprecated_reason=deprecated_portgroup_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
]
@@ -975,7 +1236,10 @@ vendor_passthru_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:vendor_passthru',
check_str=SYSTEM_ADMIN,
- scope_types=['system'],
+ # NOTE(TheJulia): Project scope listed, but not a project scoped role
+ # as some operators may find it useful to provide access to say owner
+ # admins.
+ scope_types=['system', 'project'],
description='Access vendor-specific Node functions',
operations=[
{'path': 'nodes/{node_ident}/vendor_passthru/methods',
@@ -1086,9 +1350,40 @@ roles.
volume_policies = [
policy.DocumentedRuleDefault(
- name='baremetal:volume:get',
+ name='baremetal:volume:list_all',
check_str=SYSTEM_READER,
- scope_types=['system'],
+ scope_types=['system', 'project'],
+ description=('Retrieve a list of all Volume connector and target '
+ 'records'),
+ operations=[
+ {'path': '/volume/connectors', 'method': 'GET'},
+ {'path': '/volume/targets', 'method': 'GET'},
+ {'path': '/nodes/{node_ident}/volume/connectors', 'method': 'GET'},
+ {'path': '/nodes/{node_ident}/volume/targets', 'method': 'GET'}
+ ],
+ deprecated_rule=deprecated_volume_get,
+ deprecated_reason=deprecated_volume_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:volume:list',
+ check_str=API_READER,
+ scope_types=['system', 'project'],
+ description='Retrieve a list of Volume connector and target records',
+ operations=[
+ {'path': '/volume/connectors', 'method': 'GET'},
+ {'path': '/volume/targets', 'method': 'GET'},
+ {'path': '/nodes/{node_ident}/volume/connectors', 'method': 'GET'},
+ {'path': '/nodes/{node_ident}/volume/targets', 'method': 'GET'}
+ ],
+ deprecated_rule=deprecated_volume_get,
+ deprecated_reason=deprecated_volume_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:volume:get',
+ check_str=SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Retrieve Volume connector and target records',
operations=[
{'path': '/volume', 'method': 'GET'},
@@ -1107,8 +1402,8 @@ volume_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:volume:create',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Create Volume connector and target records',
operations=[
{'path': '/volume/connectors', 'method': 'POST'},
@@ -1120,8 +1415,8 @@ volume_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:volume:delete',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_MEMBER_OR_OWNER_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Delete Volume connector and target records',
operations=[
{'path': '/volume/connectors/{volume_connector_id}',
@@ -1135,8 +1430,8 @@ volume_policies = [
),
policy.DocumentedRuleDefault(
name='baremetal:volume:update',
- check_str=SYSTEM_MEMBER,
- scope_types=['system'],
+ check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
+ scope_types=['system', 'project'],
description='Update Volume connector and target records',
operations=[
{'path': '/volume/connectors/{volume_connector_id}',
@@ -1148,6 +1443,21 @@ volume_policies = [
deprecated_reason=deprecated_volume_reason,
deprecated_since=versionutils.deprecated.WALLABY
),
+ policy.DocumentedRuleDefault(
+ name='baremetal:volume:view_target_properties',
+ check_str=TARGET_PROPERTIES_READER,
+ scope_types=['system', 'project'],
+ description='Ability to view volume target properties',
+ operations=[
+ {'path': '/volume/connectors/{volume_connector_id}',
+ 'method': 'GET'},
+ {'path': '/volume/targets/{volume_target_id}',
+ 'method': 'GET'}
+ ],
+ deprecated_rule=deprecated_volume_update,
+ deprecated_reason=deprecated_volume_reason,
+ deprecated_since=versionutils.deprecated.WALLABY
+ ),
]
@@ -1491,7 +1801,9 @@ def authorize(rule, target, creds, *args, **kwargs):
try:
return enforcer.authorize(rule, target, creds, do_raise=True,
*args, **kwargs)
- except policy.PolicyNotAuthorized:
+ except policy.PolicyNotAuthorized as e:
+ LOG.error('Rejecting authorzation: %(error)s',
+ {'error': e})
raise exception.HTTPForbidden(resource=rule)
@@ -1503,3 +1815,16 @@ def check(rule, target, creds, *args, **kwargs):
"""
enforcer = get_enforcer()
return enforcer.enforce(rule, target, creds, *args, **kwargs)
+
+
+def check_policy(rule, target, creds, *args, **kwargs):
+ """Configuration aware role policy check wrapper.
+
+ Checks authorization of a rule against the target and credentials
+ and returns True or False.
+ Always returns true if CONF.auth_strategy is not keystone.
+ """
+ if CONF.auth_strategy != 'keystone':
+ return True
+ enforcer = get_enforcer()
+ return enforcer.enforce(rule, target, creds, *args, **kwargs)
diff --git a/ironic/common/pxe_utils.py b/ironic/common/pxe_utils.py
index 2a78f0c3d..b05306a16 100644
--- a/ironic/common/pxe_utils.py
+++ b/ironic/common/pxe_utils.py
@@ -861,12 +861,7 @@ def get_volume_pxe_options(task):
return prop
return __return_item_or_first_if_list(properties.get(key + 's', ''))
- def __generate_iscsi_url(properties):
- """Returns iscsi url."""
- portal = __get_property(properties, 'target_portal')
- iqn = __get_property(properties, 'target_iqn')
- lun = __get_property(properties, 'target_lun')
-
+ def __format_portal(portal, iqn, lun):
if ':' in portal:
host, port = portal.split(':')
else:
@@ -875,6 +870,20 @@ def get_volume_pxe_options(task):
return ("iscsi:%(host)s::%(port)s:%(lun)s:%(iqn)s" %
{'host': host, 'port': port, 'lun': lun, 'iqn': iqn})
+ def __generate_iscsi_url(properties):
+ """Returns iscsi url."""
+ iqn = __get_property(properties, 'target_iqn')
+ lun = __get_property(properties, 'target_lun')
+ if 'target_portals' in properties:
+ portals = properties.get('target_portals')
+ formatted_portals = []
+ for portal in portals:
+ formatted_portals.append(__format_portal(portal, iqn, lun))
+ return ' '.join(formatted_portals)
+ else:
+ portal = __get_property(properties, 'target_portal')
+ return __format_portal(portal, iqn, lun)
+
pxe_options = {}
node = task.node
boot_volume = node.driver_internal_info.get('boot_from_volume')
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index 3d997d5e2..b6adde974 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -206,6 +206,10 @@ class TaskManager(object):
self.context = context
self._node = None
+ self._ports = None
+ self._portgroups = None
+ self._volume_connectors = None
+ self._volume_targets = None
self.node_id = node_id
self.shared = shared
self._retry = retry
@@ -233,13 +237,6 @@ class TaskManager(object):
self._debug_timer.restart()
self.node = node
- self.ports = objects.Port.list_by_node_id(context, self.node.id)
- self.portgroups = objects.Portgroup.list_by_node_id(context,
- self.node.id)
- self.volume_connectors = objects.VolumeConnector.list_by_node_id(
- context, self.node.id)
- self.volume_targets = objects.VolumeTarget.list_by_node_id(
- context, self.node.id)
if load_driver:
self.driver = driver_factory.build_driver_for_task(self)
else:
@@ -260,6 +257,67 @@ class TaskManager(object):
self.fsm.initialize(start_state=self.node.provision_state,
target_state=self.node.target_provision_state)
+ @property
+ def ports(self):
+ try:
+ if self._ports is None:
+ self._ports = objects.Port.list_by_node_id(self.context,
+ self.node.id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.release_resources()
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ self._ports = ports
+
+ @property
+ def portgroups(self):
+ try:
+ if self._portgroups is None:
+ self._portgroups = objects.Portgroup.list_by_node_id(
+ self.context, self.node.id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.release_resources()
+ return self._portgroups
+
+ @portgroups.setter
+ def portgroups(self, portgroups):
+ self._portgroups = portgroups
+
+ @property
+ def volume_connectors(self):
+ try:
+ if self._volume_connectors is None:
+ self._volume_connectors = \
+ objects.VolumeConnector.list_by_node_id(
+ self.context, self.node.id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.release_resources()
+ return self._volume_connectors
+
+ @volume_connectors.setter
+ def volume_connectors(self, volume_connectors):
+ self._volume_connectors = volume_connectors
+
+ @property
+ def volume_targets(self):
+ try:
+ if self._volume_targets is None:
+ self._volume_targets = objects.VolumeTarget.list_by_node_id(
+ self.context, self.node.id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.release_resources()
+ return self._volume_targets
+
+ @volume_targets.setter
+ def volume_targets(self, volume_targets):
+ self._volume_targets = volume_targets
+
def load_driver(self):
if self.driver is None:
self.driver = driver_factory.build_driver_for_task(self)
diff --git a/ironic/conf/ilo.py b/ironic/conf/ilo.py
index cb78edea6..364c64c81 100644
--- a/ironic/conf/ilo.py
+++ b/ironic/conf/ilo.py
@@ -111,6 +111,15 @@ opts = [
'or as the octal number ``0o644`` in Python. '
'This setting must be set to the octal number '
'representation, meaning starting with ``0o``.')),
+ cfg.StrOpt('kernel_append_params',
+ default='nofb nomodeset vga=normal',
+ mutable=True,
+ help=_('Additional kernel parameters to pass down to the '
+ 'instance kernel. These parameters can be consumed by '
+ 'the kernel or by the applications by reading '
+ '/proc/cmdline. Mind severe cmdline size limit! Can be '
+ 'overridden by `instance_info/kernel_append_params` '
+ 'property.')),
]
diff --git a/ironic/conf/redfish.py b/ironic/conf/redfish.py
index ad522865e..eddf3e013 100644
--- a/ironic/conf/redfish.py
+++ b/ironic/conf/redfish.py
@@ -90,6 +90,16 @@ opts = [
default=60,
help=_('Number of seconds to wait between checking for '
'failed firmware update tasks')),
+ cfg.IntOpt('raid_config_status_interval',
+ min=0,
+ default=60,
+ help=_('Number of seconds to wait between checking for '
+ 'completed raid config tasks')),
+ cfg.IntOpt('raid_config_fail_interval',
+ min=0,
+ default=60,
+ help=_('Number of seconds to wait between checking for '
+ 'failed raid config tasks')),
]
diff --git a/ironic/db/api.py b/ironic/db/api.py
index bee805f5c..d44ea73ef 100644
--- a/ironic/db/api.py
+++ b/ironic/db/api.py
@@ -344,10 +344,11 @@ class Connection(object, metaclass=abc.ABCMeta):
"""
@abc.abstractmethod
- def get_portgroup_by_address(self, address):
+ def get_portgroup_by_address(self, address, project=None):
"""Return a network portgroup representation.
:param address: The MAC address of a portgroup.
+ :param project: A node owner or lessee to filter by.
:returns: A portgroup.
:raises: PortgroupNotFound
"""
@@ -363,7 +364,8 @@ class Connection(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_portgroup_list(self, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None,
+ project=None):
"""Return a list of portgroups.
:param limit: Maximum number of portgroups to return.
@@ -372,12 +374,14 @@ class Connection(object, metaclass=abc.ABCMeta):
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
+ :param project: A node owner or lessee to filter by.
:returns: A list of portgroups.
"""
@abc.abstractmethod
def get_portgroups_by_node_id(self, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None,
+ project=None):
"""List all the portgroups for a given node.
:param node_id: The integer node ID.
@@ -387,6 +391,7 @@ class Connection(object, metaclass=abc.ABCMeta):
:param sort_key: Attribute by which results should be sorted
:param sort_dir: Direction in which results should be sorted
(asc, desc)
+ :param project: A node owner or lessee to filter by.
:returns: A list of portgroups.
"""
@@ -709,7 +714,8 @@ class Connection(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_volume_connector_list(self, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None,
+ project=None):
"""Return a list of volume connectors.
:param limit: Maximum number of volume connectors to return.
@@ -718,6 +724,8 @@ class Connection(object, metaclass=abc.ABCMeta):
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: Direction in which results should be sorted.
(asc, desc)
+ :param project: The associated node project to search with.
+ :returns: a list of :class:`VolumeConnector` objects
:returns: A list of volume connectors.
:raises: InvalidParameterValue If sort_key does not exist.
"""
@@ -745,7 +753,7 @@ class Connection(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_volume_connectors_by_node_id(self, node_id, limit=None,
marker=None, sort_key=None,
- sort_dir=None):
+ sort_dir=None, project=None):
"""List all the volume connectors for a given node.
:param node_id: The integer node ID.
@@ -755,6 +763,8 @@ class Connection(object, metaclass=abc.ABCMeta):
:param sort_key: Attribute by which results should be sorted
:param sort_dir: Direction in which results should be sorted
(asc, desc)
+ :param project: The associated node project to search with.
+ :returns: a list of :class:`VolumeConnector` objects
:returns: A list of volume connectors.
:raises: InvalidParameterValue If sort_key does not exist.
"""
@@ -808,7 +818,8 @@ class Connection(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_volume_target_list(self, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None,
+ project=None):
"""Return a list of volume targets.
:param limit: Maximum number of volume targets to return.
@@ -817,6 +828,8 @@ class Connection(object, metaclass=abc.ABCMeta):
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
+ :param project: The associated node project to search with.
+ :returns: a list of :class:`VolumeConnector` objects
:returns: A list of volume targets.
:raises: InvalidParameterValue if sort_key does not exist.
"""
@@ -844,7 +857,7 @@ class Connection(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_volume_targets_by_node_id(self, node_id, limit=None,
marker=None, sort_key=None,
- sort_dir=None):
+ sort_dir=None, project=None):
"""List all the volume targets for a given node.
:param node_id: The integer node ID.
@@ -854,6 +867,8 @@ class Connection(object, metaclass=abc.ABCMeta):
:param sort_key: Attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted
(asc, desc)
+ :param project: The associated node project to search with.
+ :returns: a list of :class:`VolumeConnector` objects
:returns: A list of volume targets.
:raises: InvalidParameterValue if sort_key does not exist.
"""
@@ -861,7 +876,7 @@ class Connection(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_volume_targets_by_volume_id(self, volume_id, limit=None,
marker=None, sort_key=None,
- sort_dir=None):
+ sort_dir=None, project=None):
"""List all the volume targets for a given volume id.
:param volume_id: The UUID of the volume.
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index 716fd601f..6f38c4b8f 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -162,6 +162,27 @@ def add_port_filter_by_node_project(query, value):
| (models.Node.lessee == value))
+def add_portgroup_filter_by_node_project(query, value):
+ query = query.join(models.Node,
+ models.Portgroup.node_id == models.Node.id)
+ return query.filter((models.Node.owner == value)
+ | (models.Node.lessee == value))
+
+
+def add_volume_conn_filter_by_node_project(query, value):
+ query = query.join(models.Node,
+ models.VolumeConnector.node_id == models.Node.id)
+ return query.filter((models.Node.owner == value)
+ | (models.Node.lessee == value))
+
+
+def add_volume_target_filter_by_node_project(query, value):
+ query = query.join(models.Node,
+ models.VolumeTarget.node_id == models.Node.id)
+ return query.filter((models.Node.owner == value)
+ | (models.Node.lessee == value))
+
+
def add_portgroup_filter(query, value):
"""Adds a portgroup-specific filter to a query.
@@ -796,8 +817,10 @@ class Connection(api.Connection):
if count == 0:
raise exception.PortNotFound(port=port_id)
- def get_portgroup_by_id(self, portgroup_id):
+ def get_portgroup_by_id(self, portgroup_id, project=None):
query = model_query(models.Portgroup).filter_by(id=portgroup_id)
+ if project:
+ query = add_portgroup_filter_by_node_project(query, project)
try:
return query.one()
except NoResultFound:
@@ -810,8 +833,10 @@ class Connection(api.Connection):
except NoResultFound:
raise exception.PortgroupNotFound(portgroup=portgroup_uuid)
- def get_portgroup_by_address(self, address):
+ def get_portgroup_by_address(self, address, project=None):
query = model_query(models.Portgroup).filter_by(address=address)
+ if project:
+ query = add_portgroup_filter_by_node_project(query, project)
try:
return query.one()
except NoResultFound:
@@ -825,14 +850,19 @@ class Connection(api.Connection):
raise exception.PortgroupNotFound(portgroup=name)
def get_portgroup_list(self, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
+ query = model_query(models.Portgroup)
+ if project:
+ query = add_portgroup_filter_by_node_project(query, project)
return _paginate_query(models.Portgroup, limit, marker,
- sort_key, sort_dir)
+ sort_key, sort_dir, query)
def get_portgroups_by_node_id(self, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
query = model_query(models.Portgroup)
query = query.filter_by(node_id=node_id)
+ if project:
+ query = add_portgroup_filter_by_node_project(query, project)
return _paginate_query(models.Portgroup, limit, marker,
sort_key, sort_dir, query)
@@ -1219,9 +1249,12 @@ class Connection(api.Connection):
% addresses)
def get_volume_connector_list(self, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
+ query = model_query(models.VolumeConnector)
+ if project:
+ query = add_volume_conn_filter_by_node_project(query, project)
return _paginate_query(models.VolumeConnector, limit, marker,
- sort_key, sort_dir)
+ sort_key, sort_dir, query)
def get_volume_connector_by_id(self, db_id):
query = model_query(models.VolumeConnector).filter_by(id=db_id)
@@ -1240,8 +1273,10 @@ class Connection(api.Connection):
def get_volume_connectors_by_node_id(self, node_id, limit=None,
marker=None, sort_key=None,
- sort_dir=None):
+ sort_dir=None, project=None):
query = model_query(models.VolumeConnector).filter_by(node_id=node_id)
+ if project:
+ add_volume_conn_filter_by_node_project(query, project)
return _paginate_query(models.VolumeConnector, limit, marker,
sort_key, sort_dir, query)
@@ -1299,9 +1334,12 @@ class Connection(api.Connection):
raise exception.VolumeConnectorNotFound(connector=ident)
def get_volume_target_list(self, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
+ query = model_query(models.VolumeTarget)
+ if project:
+ query = add_volume_target_filter_by_node_project(query, project)
return _paginate_query(models.VolumeTarget, limit, marker,
- sort_key, sort_dir)
+ sort_key, sort_dir, query)
def get_volume_target_by_id(self, db_id):
query = model_query(models.VolumeTarget).filter_by(id=db_id)
@@ -1318,15 +1356,20 @@ class Connection(api.Connection):
raise exception.VolumeTargetNotFound(target=uuid)
def get_volume_targets_by_node_id(self, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None,
+ project=None):
query = model_query(models.VolumeTarget).filter_by(node_id=node_id)
+ if project:
+ add_volume_target_filter_by_node_project(query, project)
return _paginate_query(models.VolumeTarget, limit, marker, sort_key,
sort_dir, query)
def get_volume_targets_by_volume_id(self, volume_id, limit=None,
marker=None, sort_key=None,
- sort_dir=None):
+ sort_dir=None, project=None):
query = model_query(models.VolumeTarget).filter_by(volume_id=volume_id)
+ if project:
+ query = add_volume_target_filter_by_node_project(query, project)
return _paginate_query(models.VolumeTarget, limit, marker, sort_key,
sort_dir, query)
diff --git a/ironic/drivers/modules/drac/inspect.py b/ironic/drivers/modules/drac/inspect.py
index 77e48226f..3218b3557 100644
--- a/ironic/drivers/modules/drac/inspect.py
+++ b/ironic/drivers/modules/drac/inspect.py
@@ -49,7 +49,8 @@ class DracRedfishInspect(redfish_inspect.RedfishInspect):
class DracWSManInspect(base.InspectInterface):
- _GPU_SUPPORTED_LIST = {"TU104GL [Tesla T4]"}
+ _GPU_SUPPORTED_LIST = {"TU104GL [Tesla T4]",
+ "GV100GL [Tesla V100 PCIe 16GB]"}
def get_properties(self):
"""Return the properties of the interface.
diff --git a/ironic/drivers/modules/ilo/boot.py b/ironic/drivers/modules/ilo/boot.py
index 6f4490dbb..a5428ad4f 100644
--- a/ironic/drivers/modules/ilo/boot.py
+++ b/ironic/drivers/modules/ilo/boot.py
@@ -78,7 +78,17 @@ OPTIONAL_PROPERTIES = {
"certificates require to be added to the "
"iLO.")
}
+KERNEL_PARAM_PROPERTIES = {
+ 'ilo_kernel_append_params': _("Additional kernel parameters to pass down "
+ "to instance kernel. These parameters can "
+ "be consumed by the kernel or by the "
+ "applications by reading /proc/cmdline. "
+ "Mind severe cmdline size limit. Overrides "
+ "[ilo]/kernel_append_params ironic option.")
+}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
+VMEDIA_OPTIONAL_PROPERTIES = OPTIONAL_PROPERTIES.copy()
+VMEDIA_OPTIONAL_PROPERTIES.update(KERNEL_PARAM_PROPERTIES)
KERNEL_RAMDISK_LABELS = {
'deploy': REQUIRED_PROPERTIES_UEFI_HTTPS_BOOT,
@@ -127,7 +137,7 @@ def parse_driver_info(node, mode='deploy'):
d_info.update(
{k: info.get(k, getattr(CONF.conductor, k.replace('ilo_', ''), None))
- for k in OPTIONAL_PROPERTIES})
+ for k in VMEDIA_OPTIONAL_PROPERTIES})
d_info.pop('ilo_add_certificates', None)
return d_info
@@ -926,6 +936,10 @@ class IloUefiHttpsBoot(base.BootInterface):
"parameters were missing in node's driver_info") % mode)
deploy_utils.check_for_missing_params(deploy_info, error_msg)
+ deploy_info.update(
+ {k: info.get(k, getattr(CONF.ilo, k.replace('ilo_', ''), None))
+ for k in KERNEL_PARAM_PROPERTIES})
+
deploy_info.update(ilo_common.parse_driver_info(node))
return deploy_info
diff --git a/ironic/drivers/modules/image_utils.py b/ironic/drivers/modules/image_utils.py
index fb61a8d1d..786e9bb4c 100644
--- a/ironic/drivers/modules/image_utils.py
+++ b/ironic/drivers/modules/image_utils.py
@@ -39,55 +39,53 @@ LOG = log.getLogger(__name__)
class ImageHandler(object):
- _SWIFT_MAP = {
- "redfish": {
- "swift_enabled": CONF.redfish.use_swift,
- "container": CONF.redfish.swift_container,
- "timeout": CONF.redfish.swift_object_expiry_timeout,
- "image_subdir": "redfish",
- "file_permission": CONF.redfish.file_permission,
- "kernel_params": CONF.redfish.kernel_append_params
- },
- "idrac": {
- "swift_enabled": CONF.redfish.use_swift,
- "container": CONF.redfish.swift_container,
- "timeout": CONF.redfish.swift_object_expiry_timeout,
- "image_subdir": "redfish",
- "file_permission": CONF.redfish.file_permission,
- "kernel_params": CONF.redfish.kernel_append_params
- },
- "ilo5": {
- "swift_enabled": not CONF.ilo.use_web_server_for_images,
- "container": CONF.ilo.swift_ilo_container,
- "timeout": CONF.ilo.swift_object_expiry_timeout,
- "image_subdir": "ilo",
- "file_permission": CONF.ilo.file_permission,
- "kernel_params": CONF.pxe.pxe_append_params
- },
- "ilo": {
- "swift_enabled": not CONF.ilo.use_web_server_for_images,
- "container": CONF.ilo.swift_ilo_container,
- "timeout": CONF.ilo.swift_object_expiry_timeout,
- "image_subdir": "ilo",
- "file_permission": CONF.ilo.file_permission,
- "kernel_params": CONF.pxe.pxe_append_params
- },
- }
-
def __init__(self, driver):
+ self.update_driver_config(driver)
+
+ def update_driver_config(self, driver):
+ _SWIFT_MAP = {
+ "redfish": {
+ "swift_enabled": CONF.redfish.use_swift,
+ "container": CONF.redfish.swift_container,
+ "timeout": CONF.redfish.swift_object_expiry_timeout,
+ "image_subdir": "redfish",
+ "file_permission": CONF.redfish.file_permission,
+ "kernel_params": CONF.redfish.kernel_append_params
+ },
+ "idrac": {
+ "swift_enabled": CONF.redfish.use_swift,
+ "container": CONF.redfish.swift_container,
+ "timeout": CONF.redfish.swift_object_expiry_timeout,
+ "image_subdir": "redfish",
+ "file_permission": CONF.redfish.file_permission,
+ "kernel_params": CONF.redfish.kernel_append_params
+ },
+ "ilo5": {
+ "swift_enabled": not CONF.ilo.use_web_server_for_images,
+ "container": CONF.ilo.swift_ilo_container,
+ "timeout": CONF.ilo.swift_object_expiry_timeout,
+ "image_subdir": "ilo",
+ "file_permission": CONF.ilo.file_permission,
+ "kernel_params": CONF.ilo.kernel_append_params
+ },
+ "ilo": {
+ "swift_enabled": not CONF.ilo.use_web_server_for_images,
+ "container": CONF.ilo.swift_ilo_container,
+ "timeout": CONF.ilo.swift_object_expiry_timeout,
+ "image_subdir": "ilo",
+ "file_permission": CONF.ilo.file_permission,
+ "kernel_params": CONF.ilo.kernel_append_params
+ },
+ }
+
self._driver = driver
- self._container = self._SWIFT_MAP[driver].get("container")
- self._timeout = self._SWIFT_MAP[driver].get("timeout")
- self._image_subdir = self._SWIFT_MAP[driver].get("image_subdir")
- self._file_permission = self._SWIFT_MAP[driver].get("file_permission")
+ self.swift_enabled = _SWIFT_MAP[driver].get("swift_enabled")
+ self._container = _SWIFT_MAP[driver].get("container")
+ self._timeout = _SWIFT_MAP[driver].get("timeout")
+ self._image_subdir = _SWIFT_MAP[driver].get("image_subdir")
+ self._file_permission = _SWIFT_MAP[driver].get("file_permission")
# To get the kernel parameters
- self.kernel_params = self._SWIFT_MAP[driver].get("kernel_params")
-
- def _is_swift_enabled(self):
- try:
- return self._SWIFT_MAP[self._driver].get("swift_enabled")
- except KeyError:
- return False
+ self.kernel_params = _SWIFT_MAP[driver].get("kernel_params")
def unpublish_image(self, object_name):
"""Withdraw the image previously made downloadable.
@@ -98,7 +96,7 @@ class ImageHandler(object):
:param object_name: name of the published file (optional)
"""
- if self._is_swift_enabled():
+ if self.swift_enabled:
container = self._container
swift_api = swift.SwiftAPI()
@@ -179,7 +177,7 @@ class ImageHandler(object):
:return: a URL to download published file
"""
- if self._is_swift_enabled():
+ if self.swift_enabled:
container = self._container
timeout = self._timeout
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
new file mode 100644
index 000000000..7e14735e2
--- /dev/null
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -0,0 +1,1119 @@
+# Copyright 2021 DMTF. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import math
+
+from futurist import periodics
+from ironic_lib import metrics_utils
+from oslo_log import log
+from oslo_utils import importutils
+from oslo_utils import units
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import states
+from ironic.conductor import task_manager
+from ironic.conductor import utils as manager_utils
+from ironic.conf import CONF
+from ironic.drivers import base
+from ironic.drivers.modules import deploy_utils
+from ironic.drivers.modules.redfish import utils as redfish_utils
+
+LOG = log.getLogger(__name__)
+METRICS = metrics_utils.get_metrics_logger(__name__)
+
+
+# TODO(billdodd): double-check all these values
+RAID_LEVELS = {
+ '0': {
+ 'min_disks': 1,
+ 'max_disks': 1000,
+ 'type': 'simple',
+ 'volume_type': 'NonRedundant',
+ 'raid_type': 'RAID0',
+ 'overhead': 0
+ },
+ '1': {
+ 'min_disks': 2,
+ 'max_disks': 2,
+ 'type': 'simple',
+ 'volume_type': 'Mirrored',
+ 'raid_type': 'RAID1',
+ 'overhead': 1
+ },
+ '5': {
+ 'min_disks': 3,
+ 'max_disks': 1000,
+ 'type': 'simple',
+ 'volume_type': 'StripedWithParity',
+ 'raid_type': 'RAID5',
+ 'overhead': 1
+ },
+ '6': {
+ 'min_disks': 4,
+ 'max_disks': 1000,
+ 'type': 'simple',
+ 'volume_type': 'StripedWithParity',
+ 'raid_type': 'RAID6',
+ 'overhead': 2
+ },
+ '1+0': {
+ 'type': 'spanned',
+ 'volume_type': 'SpannedMirrors',
+ 'raid_type': 'RAID10',
+ 'span_type': '1'
+ },
+ '5+0': {
+ 'type': 'spanned',
+ 'volume_type': 'SpannedStripesWithParity',
+ 'raid_type': 'RAID50',
+ 'span_type': '5'
+ },
+ '6+0': {
+ 'type': 'spanned',
+ 'volume_type': 'SpannedStripesWithParity',
+ 'raid_type': 'RAID60',
+ 'span_type': '6'
+ }
+}
+
+sushy = importutils.try_import('sushy')
+
+
+def convert_drive_units(logical_disks, node):
+ """Convert size in logical_disks from gb to bytes"""
+ for disk in logical_disks:
+ if disk['size_gb'] == 'MAX' and 'physical_disks' not in disk:
+ raise exception.InvalidParameterValue(
+ _("create_configuration called with invalid "
+ "target_raid_configuration for node %(node_id)s. "
+ "'physical_disks' is missing from logical_disk while "
+ "'size_gb'='MAX' was requested: "
+ "%(logical_disk)s") % {'node_id': node.uuid,
+ 'logical_disk': disk})
+
+ if disk['size_gb'] == 'MAX':
+ disk['size_bytes'] = 'MAX'
+ else:
+ disk['size_bytes'] = disk['size_gb'] * units.Gi
+
+ del disk['size_gb']
+
+
+def get_physical_disks(node):
+ """Get the physical drives of the node.
+
+ :param node: an ironic node object.
+ :returns: a list of Drive objects from sushy
+ :raises: RedfishConnectionError when it fails to connect to Redfish
+ :raises: RedfishError if there is an error getting the drives via Redfish
+ """
+ system = redfish_utils.get_system(node)
+
+ disks = []
+ disk_to_controller = {}
+ try:
+ collection = system.storage
+ for storage in collection.get_members():
+ disks.extend(storage.drives)
+ controller = (storage.storage_controllers[0]
+ if storage.storage_controllers else None)
+ for drive in storage.drives:
+ disk_to_controller[drive] = controller
+ except sushy.exceptions.SushyError as exc:
+ error_msg = _('Cannot get the list of physical disks for node '
+ '%(node_uuid)s. Reason: %(error)s.' %
+ {'node_uuid': node.uuid, 'error': exc})
+ LOG.error(error_msg)
+ raise exception.RedfishError(error=exc)
+ return disks, disk_to_controller
+
+
+def _raise_raid_level_not_supported(raid_level):
+ """Helper function for the 'RAID level is not supported' error
+
+ :param raid_level: RAID level of the virtual disk
+ :raises: exception.RedfishError
+ """
+ reason = (_('RAID level %(raid_level)s is not supported by the '
+ 'driver. Supported RAID levels: %(supported_raid_levels)s')
+ % {'raid_level': raid_level,
+ 'supported_raid_levels': ', '.join(RAID_LEVELS)})
+ raise exception.RedfishError(error=reason)
+
+
+def _raid_level_overhead(raid_level, spans_count=1):
+ """Calculate the drive overhead for the given RAID level
+
+ Drive overhead is the number of additional drives required to hold the
+ the redundant data needed for mirrored volumes and the parity checksums
+ for volumes with parity.
+
+ :param raid_level: RAID level of the virtual disk
+ :param spans_count: number of spans for the virtual disk
+ :return: the number of drives of overhead
+ :raises: RedfishError if RAID level is not supported
+ """
+ try:
+ raid_level_info = RAID_LEVELS[raid_level]
+ except KeyError:
+ _raise_raid_level_not_supported(raid_level)
+
+ if raid_level_info['type'] == 'spanned':
+ if spans_count <= 1:
+ reason = _('Spanned RAID volumes cannot contain a single span')
+ raise exception.RedfishError(error=reason)
+
+ span_type = raid_level_info['span_type']
+ raid_level_info = RAID_LEVELS[span_type]
+
+ return raid_level_info['overhead'] * spans_count
+
+
+def _max_volume_size_bytes(raid_level, physical_disks, free_space_bytes,
+ spans_count=1, stripe_size_kb=64 * units.Ki):
+ # restrict the size to the smallest available space
+ free_spaces = [free_space_bytes[disk] for disk in physical_disks]
+ size_kb = min(free_spaces) // units.Ki
+
+ # NOTE(ifarkas): using math.floor so we get a volume size that does not
+ # exceed the available space
+ stripes_per_disk = int(math.floor(float(size_kb) / stripe_size_kb))
+
+ disks_count = len(physical_disks)
+ overhead_disks_count = _raid_level_overhead(raid_level, spans_count)
+ if disks_count <= overhead_disks_count:
+ reason = _('The number of physical drives (%(drives)s) is too few for '
+ 'the required number of overhead drives (%(overhead)s)' %
+ {'drives': disks_count, 'overhead': overhead_disks_count})
+ raise exception.RedfishError(error=reason)
+
+ max_volume_size_bytes = int(
+ stripes_per_disk * stripe_size_kb
+ * (disks_count - overhead_disks_count) * units.Ki)
+ return max_volume_size_bytes
+
+
+def _volume_usage_per_disk_bytes(logical_disk, physical_disks, spans_count=1,
+ stripe_size_kb=64 * units.Ki):
+ disks_count = len(physical_disks)
+ overhead_disks_count = _raid_level_overhead(logical_disk['raid_level'],
+ spans_count)
+ volume_size_kb = logical_disk['size_bytes'] // units.Ki
+ # NOTE(ifarkas): using math.ceil so we get the largest disk usage
+ # possible, so we can avoid over-committing
+ stripes_per_volume = math.ceil(float(volume_size_kb) / stripe_size_kb)
+
+ stripes_per_disk = math.ceil(
+ float(stripes_per_volume) / (disks_count - overhead_disks_count))
+ volume_usage_per_disk_bytes = int(
+ stripes_per_disk * stripe_size_kb * units.Ki)
+ return volume_usage_per_disk_bytes
+
+
+def _calculate_spans(raid_level, disks_count):
+ """Calculates number of spans for a RAID level given a physical disk count
+
+ :param raid_level: RAID level of the virtual disk.
+ :param disks_count: number of physical disks used for the virtual disk.
+ :returns: number of spans.
+ """
+ if raid_level in ['0', '1', '5', '6']:
+ return 1
+ elif raid_level in ['5+0', '6+0']:
+ return 2
+ elif raid_level in ['1+0']:
+ return disks_count >> 1
+ else:
+ reason = (_('Cannot calculate spans for RAID level "%s"') %
+ raid_level)
+ raise exception.RedfishError(error=reason)
+
+
+def _calculate_volume_props(logical_disk, physical_disks, free_space_bytes,
+ disk_to_controller):
+ """Calculate specific properties of the volume and update logical_disk dict
+
+ Calculates various properties like span_depth and span_length for the
+ logical disk to be created. Converts the size_gb property to size_bytes for
+ use by sushy. Also performs checks to be sure the amount of physical space
+ required for the logical disk is available.
+
+ :param logical_disk: properties of the logical disk to create as
+ specified by the operator.
+ :param physical_disks: list of drives available on the node.
+ :param free_space_bytes: dict mapping drives to their available space.
+ :param disk_to_controller: dict mapping drives to their controller.
+ :raises: RedfishError if physical drives cannot fulfill the logical disk.
+ """
+ # TODO(billdodd): match e.g. {'size': '> 100'} -> oslo_utils.specs_matcher
+ selected_disks = [disk for disk in physical_disks
+ if disk.identity in logical_disk['physical_disks']]
+
+ spans_count = _calculate_spans(
+ logical_disk['raid_level'], len(selected_disks))
+
+ if spans_count == 0 or len(selected_disks) % spans_count != 0:
+ error_msg = _('For RAID level %(raid_level)s, the number of physical '
+ 'disks provided (%(num_disks)s) must be a multiple of '
+ 'the spans count (%(spans_count)s)' %
+ {'raid_level': logical_disk['raid_level'],
+ 'num_disks': len(selected_disks),
+ 'spans_count': spans_count})
+ raise exception.RedfishError(error=error_msg)
+
+ disks_per_span = len(selected_disks) / spans_count
+
+ # TODO(billdodd): confirm this?
+ # Best practice is to not pass span_length and span_depth when creating a
+ # RAID10. Redfish will dynamically calculate these values using maximum
+ # values obtained from the RAID controller.
+ logical_disk['span_depth'] = None
+ logical_disk['span_length'] = None
+ if logical_disk['raid_level'] != '1+0':
+ logical_disk['span_depth'] = spans_count
+ logical_disk['span_length'] = disks_per_span
+
+ max_volume_size_bytes = _max_volume_size_bytes(
+ logical_disk['raid_level'], selected_disks, free_space_bytes,
+ spans_count=spans_count)
+
+ if logical_disk['size_bytes'] == 'MAX':
+ if max_volume_size_bytes == 0:
+ error_msg = _("size set to 'MAX' but could not allocate physical "
+ "disk space")
+ raise exception.RedfishError(error=error_msg)
+
+ logical_disk['size_bytes'] = max_volume_size_bytes
+ elif max_volume_size_bytes < logical_disk['size_bytes']:
+ error_msg = _('The physical disk space (%(max_vol_size)s bytes) is '
+ 'not enough for the size of the logical disk '
+ '(%(logical_size)s bytes)' %
+ {'max_vol_size': max_volume_size_bytes,
+ 'logical_size': logical_disk['size_bytes']})
+ raise exception.RedfishError(error=error_msg)
+
+ disk_usage = _volume_usage_per_disk_bytes(logical_disk, selected_disks,
+ spans_count=spans_count)
+
+ for disk in selected_disks:
+ if free_space_bytes[disk] < disk_usage:
+ error_msg = _('The free space of a disk (%(free_space)s bytes) '
+ 'is not enough for the per-disk size of the logical '
+ 'disk (%(disk_usage)s bytes)' %
+ {'free_space': free_space_bytes[disk],
+ 'disk_usage': disk_usage})
+ raise exception.RedfishError(error=error_msg)
+ else:
+ free_space_bytes[disk] -= disk_usage
+
+ if 'controller' not in logical_disk:
+ controller = disk_to_controller[selected_disks[0]]
+ if controller and controller.identifiers:
+ durable_name = controller.identifiers[0].durable_name
+ logical_disk['controller'] = durable_name
+
+
+def _raid_level_min_disks(raid_level, spans_count=1):
+ try:
+ raid_level_info = RAID_LEVELS[raid_level]
+ except KeyError:
+ _raise_raid_level_not_supported(raid_level)
+
+ if raid_level_info['type'] == 'spanned':
+ if spans_count <= 1:
+ reason = _('Spanned RAID volumes cannot contain a single span')
+ raise exception.RedfishError(error=reason)
+
+ span_type = raid_level_info['span_type']
+ raid_level_info = RAID_LEVELS[span_type]
+
+ return raid_level_info['min_disks'] * spans_count
+
+
+def _raid_level_max_disks(raid_level, spans_count=1):
+ try:
+ raid_level_info = RAID_LEVELS[raid_level]
+ except KeyError:
+ _raise_raid_level_not_supported(raid_level)
+
+ if raid_level_info['type'] == 'spanned':
+ if spans_count <= 1:
+ reason = _('Spanned RAID volumes cannot contain a single span')
+ raise exception.RedfishError(error=reason)
+
+ span_type = raid_level_info['span_type']
+ raid_level_info = RAID_LEVELS[span_type]
+
+ return raid_level_info['max_disks'] * spans_count
+
+
+def _usable_disks_count(raid_level, disks_count):
+ """Calculates the number of disks usable for a RAID level
+
+ ...given a physical disk count
+
+ :param raid_level: RAID level of the virtual disk.
+ :param disks_count: number of physical disks used for the virtual disk.
+ :returns: number of disks.
+ :raises: RedfishError if RAID level is not supported.
+ """
+ if raid_level in ['0', '1', '5', '6']:
+ return disks_count
+ elif raid_level in ['5+0', '6+0', '1+0']:
+ # largest even number less than disk_count
+ return (disks_count >> 1) << 1
+ else:
+ _raise_raid_level_not_supported(raid_level)
+
+
+def _assign_disks_to_volume(logical_disks, physical_disks_by_type,
+ free_space_bytes, disk_to_controller):
+ logical_disk = logical_disks.pop(0)
+ raid_level = logical_disk['raid_level']
+
+ # iterate over all possible configurations
+ for (disk_type,
+ protocol, size_bytes), disks in physical_disks_by_type.items():
+ if ('disk_type' in logical_disk
+ and logical_disk['disk_type'].lower() != disk_type.lower()):
+ continue
+ if ('interface_type' in logical_disk
+ and logical_disk['interface_type'].lower()
+ != protocol.lower()):
+ continue
+
+ # filter out disks without free disk space
+ disks = [disk for disk in disks if free_space_bytes[disk] > 0]
+
+ # sort disks by free size which is important if we have max disks limit
+ # on a volume
+ disks = sorted(
+ disks,
+ key=lambda disk: free_space_bytes[disk])
+
+ # filter out disks already in use if sharing is disabled
+ if ('share_physical_disks' not in logical_disk
+ or not logical_disk['share_physical_disks']):
+ disks = [disk for disk in disks
+ if disk.capacity_bytes == free_space_bytes[disk]]
+
+ max_spans = _calculate_spans(raid_level, len(disks))
+ min_spans = min([2, max_spans])
+ min_disks = _raid_level_min_disks(raid_level,
+ spans_count=min_spans)
+ max_disks = _raid_level_max_disks(raid_level,
+ spans_count=max_spans)
+ candidate_max_disks = min([max_disks, len(disks)])
+
+ for disks_count in range(min_disks, candidate_max_disks + 1):
+ if ('number_of_physical_disks' in logical_disk
+ and logical_disk[
+ 'number_of_physical_disks'] != disks_count):
+ continue
+
+ # skip invalid disks_count
+ if disks_count != _usable_disks_count(logical_disk['raid_level'],
+ disks_count):
+ continue
+
+ selected_disks = disks[0:disks_count]
+
+ candidate_volume = logical_disk.copy()
+ candidate_free_space_bytes = free_space_bytes.copy()
+ candidate_volume['physical_disks'] = [disk.identity for disk
+ in selected_disks]
+ try:
+ _calculate_volume_props(candidate_volume, selected_disks,
+ candidate_free_space_bytes,
+ disk_to_controller)
+ except exception.RedfishError as exc:
+ LOG.debug('Caught RedfishError in _calculate_volume_props(). '
+ 'Reason: %s', exc)
+ continue
+
+ if len(logical_disks) > 0:
+ try:
+ result, candidate_free_space_bytes = (
+ _assign_disks_to_volume(logical_disks,
+ physical_disks_by_type,
+ candidate_free_space_bytes,
+ disk_to_controller))
+ except exception.RedfishError as exc:
+ LOG.debug('Caught RedfishError in '
+ '_assign_disks_to_volume(). Reason: %s', exc)
+ continue
+ if result:
+ logical_disks.append(candidate_volume)
+ return True, candidate_free_space_bytes
+ else:
+ logical_disks.append(candidate_volume)
+ return True, candidate_free_space_bytes
+ else:
+ # put back the logical_disk to queue
+ logical_disks.insert(0, logical_disk)
+ return False, free_space_bytes
+
+
+def _find_configuration(logical_disks, physical_disks, disk_to_controller):
+ """Find RAID configuration.
+
+ This method transforms the RAID configuration defined in Ironic to a format
+ that is required by sushy. This includes matching the physical disks
+ to RAID volumes when it's not pre-defined, or in general calculating
+ missing properties.
+ """
+
+ # shared physical disks of RAID volumes size_gb='MAX' should be
+ # de-prioritized during the matching process to reserve as much space as
+ # possible. Reserved means it won't be used during matching.
+ volumes_with_reserved_physical_disks = [
+ volume for volume in logical_disks
+ if ('physical_disks' in volume and volume['size_bytes'] == 'MAX'
+ and volume.get('share_physical_disks', False))]
+ reserved_physical_disks = [
+ disk for disk in physical_disks
+ for volume in volumes_with_reserved_physical_disks
+ if disk.identity in volume['physical_disks']]
+
+ # we require each logical disk contain only homogeneous physical disks, so
+ # sort them by type
+ physical_disks_by_type = {}
+ reserved_physical_disks_by_type = {}
+ free_space_bytes = {}
+ for disk in physical_disks:
+ # calculate free disk space
+ # NOTE(billdodd): This won't be true if part of the drive is being used
+ # by an existing Volume, but has some space available for new
+ # Volumes. Redfish and/or SNIA may address this case in future.
+ free_space_bytes[disk] = disk.capacity_bytes
+
+ disk_type = (disk.media_type, disk.protocol, disk.capacity_bytes)
+ if disk_type not in physical_disks_by_type:
+ physical_disks_by_type[disk_type] = []
+ reserved_physical_disks_by_type[disk_type] = []
+
+ if disk in reserved_physical_disks:
+ reserved_physical_disks_by_type[disk_type].append(disk)
+ else:
+ physical_disks_by_type[disk_type].append(disk)
+
+ # exclude non-shared physical disks (predefined by the user) from
+ # physical_disks_by_type because they are not going to be used during
+ # matching
+ for volume in logical_disks:
+ if ('physical_disks' in volume
+ and not volume.get('share_physical_disks', False)):
+ for disk in physical_disks:
+ if disk.identity in volume['physical_disks']:
+ disk_type = (disk.media_type, disk.protocol,
+ disk.capacity_bytes)
+ if disk in physical_disks_by_type[disk_type]:
+ physical_disks_by_type[disk_type].remove(disk)
+
+ processed_volumes = []
+
+ # step 1 - process volumes with predefined disks and exact size
+ for volume in [volume for volume in logical_disks
+ if ('physical_disks' in volume
+ and volume['size_bytes'] != 'MAX')]:
+ _calculate_volume_props(volume, physical_disks, free_space_bytes,
+ disk_to_controller)
+ processed_volumes.append(volume)
+
+ # step 2 - process volumes without predefined disks
+ volumes_without_disks = [disk for disk in logical_disks
+ if 'physical_disks' not in disk]
+
+ if volumes_without_disks:
+ result, free_space_bytes = (
+ _assign_disks_to_volume(volumes_without_disks,
+ physical_disks_by_type, free_space_bytes,
+ disk_to_controller))
+ if not result:
+ # try again using the reserved physical disks in addition
+ for disk_type, disks in physical_disks_by_type.items():
+ physical_disks_by_type[disk_type] += (
+ reserved_physical_disks_by_type[disk_type])
+
+ result, free_space_bytes = (
+ _assign_disks_to_volume(volumes_without_disks,
+ physical_disks_by_type,
+ free_space_bytes,
+ disk_to_controller))
+ if not result:
+ error_msg = _('failed to find matching physical disks for all '
+ 'logical disks')
+ LOG.error('Redfish driver failed to create RAID '
+ 'configuration. Reason: %(error)s.',
+ {'error': error_msg})
+ raise exception.RedfishError(error=error_msg)
+
+ processed_volumes += volumes_without_disks
+
+ # step 3 - process volumes with predefined disks and size_bytes == 'MAX'
+ for volume in [volume for volume in logical_disks
+ if ('physical_disks' in volume
+ and volume['size_bytes'] == 'MAX')]:
+ _calculate_volume_props(volume, physical_disks, free_space_bytes,
+ disk_to_controller)
+ processed_volumes.append(volume)
+
+ return processed_volumes
+
+
+def _filter_logical_disks(logical_disks, include_root_volume,
+ include_nonroot_volumes):
+ filtered_disks = []
+ for disk in logical_disks:
+ if include_root_volume and disk.get('is_root_volume'):
+ filtered_disks.append(disk)
+
+ if include_nonroot_volumes and not disk.get('is_root_volume'):
+ filtered_disks.append(disk)
+
+ return filtered_disks
+
+
+def _get_storage_controller(node, system, physical_disks):
+ collection = system.storage
+ for storage in collection.get_members():
+ for drive in storage.drives:
+ if drive.identity in physical_disks:
+ return storage
+
+
+def _drive_path(storage, drive_id):
+ for drive in storage.drives:
+ if drive.identity == drive_id:
+ return drive._path
+
+
+def _construct_volume_payload(
+ node, storage, raid_controller, physical_disks, raid_level, size_bytes,
+ disk_name=None, span_length=None, span_depth=None):
+ payload = {'Encrypted': False,
+ 'VolumeType': RAID_LEVELS[raid_level]['volume_type'],
+ 'RAIDType': RAID_LEVELS[raid_level]['raid_type'],
+ 'CapacityBytes': size_bytes}
+ if physical_disks:
+ payload['Links'] = {
+ "Drives": [{"@odata.id": _drive_path(storage, d)} for d in
+ physical_disks]
+ }
+ LOG.debug('Payload for RAID logical disk creation on node %(node_uuid)s: '
+ '%(payload)r', {'node': node.uuid, 'payload': payload})
+ return payload
+
+
+def create_virtual_disk(task, raid_controller, physical_disks, raid_level,
+ size_bytes, disk_name=None, span_length=None,
+ span_depth=None, error_handler=None):
+ """Create a single virtual disk on a RAID controller.
+
+ :param task: TaskManager object containing the node.
+ :param raid_controller: id of the RAID controller.
+ :param physical_disks: ids of the physical disks.
+ :param raid_level: RAID level of the virtual disk.
+ :param size_bytes: size of the virtual disk.
+ :param disk_name: name of the virtual disk. (optional)
+ :param span_depth: Number of spans in virtual disk. (optional)
+ :param span_length: Number of disks per span. (optional)
+ :param error_handler: function to call if volume create fails. (optional)
+ :returns: Newly created Volume resource or TaskMonitor if async task.
+ :raises: RedfishConnectionError when it fails to connect to Redfish.
+ :raises: RedfishError if there is an error creating the virtual disk.
+ """
+ node = task.node
+ system = redfish_utils.get_system(node)
+ storage = _get_storage_controller(node, system, physical_disks)
+ if not storage:
+ reason = _('No storage controller found for node %(node_uuid)s' %
+ {'node_uuid': node.uuid})
+ raise exception.RedfishError(error=reason)
+ volume_collection = storage.volumes
+
+ apply_time = None
+ apply_time_support = volume_collection.operation_apply_time_support
+ if apply_time_support and apply_time_support.mapped_supported_values:
+ supported_values = apply_time_support.mapped_supported_values
+ if sushy.APPLY_TIME_IMMEDIATE in supported_values:
+ apply_time = sushy.APPLY_TIME_IMMEDIATE
+ elif sushy.APPLY_TIME_ON_RESET in supported_values:
+ apply_time = sushy.APPLY_TIME_ON_RESET
+ payload = _construct_volume_payload(
+ node, storage, raid_controller, physical_disks, raid_level, size_bytes,
+ disk_name=disk_name, span_length=span_length, span_depth=span_depth)
+
+ try:
+ return volume_collection.create(payload, apply_time=apply_time)
+ except sushy.exceptions.SushyError as exc:
+ msg = ('Redfish driver failed to create virtual disk for node '
+ '%(node_uuid)s. Reason: %(error)s.')
+ if error_handler:
+ try:
+ return error_handler(task, exc, volume_collection, payload)
+ except sushy.exceptions.SushyError as exc:
+ LOG.error(msg, {'node_uuid': node.uuid, 'error': exc})
+ raise exception.RedfishError(error=exc)
+ LOG.error(msg, {'node_uuid': node.uuid, 'error': exc})
+ raise exception.RedfishError(error=exc)
+
+
+class RedfishRAID(base.RAIDInterface):
+
+ def __init__(self):
+ super(RedfishRAID, self).__init__()
+ if sushy is None:
+ raise exception.DriverLoadError(
+ driver='redfish',
+ reason=_("Unable to import the sushy library"))
+
+ def get_properties(self):
+ """Return the properties of the interface.
+
+ :returns: dictionary of <property name>:<property description> entries.
+ """
+ return redfish_utils.COMMON_PROPERTIES.copy()
+
+ @base.deploy_step(priority=0,
+ argsinfo=base.RAID_APPLY_CONFIGURATION_ARGSINFO)
+ def apply_configuration(self, task, raid_config, create_root_volume=True,
+ create_nonroot_volumes=False,
+ delete_existing=False):
+ return super(RedfishRAID, self).apply_configuration(
+ task, raid_config, create_root_volume=create_root_volume,
+ create_nonroot_volumes=create_nonroot_volumes,
+ delete_existing=delete_existing)
+
+ @base.clean_step(priority=0, abortable=False, argsinfo={
+ 'create_root_volume': {
+ 'description': (
+ 'This specifies whether to create the root volume. '
+ 'Defaults to `True`.'
+ ),
+ 'required': False
+ },
+ 'create_nonroot_volumes': {
+ 'description': (
+ 'This specifies whether to create the non-root volumes. '
+ 'Defaults to `True`.'
+ ),
+ 'required': False
+ },
+ 'delete_existing': {
+ 'description': (
+ 'Setting this to `True` indicates to delete existing RAID '
+ 'configuration prior to creating the new configuration. '
+ 'Default value is `False`.'
+ ),
+ 'required': False,
+ }
+ })
+ def create_configuration(self, task, create_root_volume=True,
+ create_nonroot_volumes=True,
+ delete_existing=False):
+ """Create RAID configuration on the node.
+
+ This method creates the RAID configuration as read from
+ node.target_raid_config. This method
+ by default will create all logical disks.
+
+ :param task: TaskManager object containing the node.
+ :param create_root_volume: Setting this to False indicates
+ not to create root volume that is specified in the node's
+ target_raid_config. Default value is True.
+ :param create_nonroot_volumes: Setting this to False indicates
+ not to create non-root volumes (all except the root volume) in
+ the node's target_raid_config. Default value is True.
+ :param delete_existing: Setting this to True indicates to delete RAID
+ configuration prior to creating the new configuration. Default is
+ False.
+ :returns: states.CLEANWAIT if RAID configuration is in progress
+ asynchronously or None if it is complete.
+ :raises: RedfishError if there is an error creating the configuration
+ """
+ node = task.node
+
+ logical_disks = node.target_raid_config['logical_disks']
+ convert_drive_units(logical_disks, node)
+ physical_disks, disk_to_controller = get_physical_disks(node)
+ # TODO(billdodd): filter out physical disks that are already in use?
+ # filter out disks with HotSpareType != "None"?
+ logical_disks = _find_configuration(logical_disks, physical_disks,
+ disk_to_controller)
+
+ logical_disks_to_create = _filter_logical_disks(
+ logical_disks, create_root_volume, create_nonroot_volumes)
+
+ self.pre_create_configuration(task, logical_disks_to_create)
+
+ reboot_required = False
+ raid_configs = list()
+ for logical_disk in logical_disks_to_create:
+ raid_config = dict()
+ response = create_virtual_disk(
+ task,
+ raid_controller=logical_disk.get('controller'),
+ physical_disks=logical_disk['physical_disks'],
+ raid_level=logical_disk['raid_level'],
+ size_bytes=logical_disk['size_bytes'],
+ disk_name=logical_disk.get('name'),
+ span_length=logical_disk.get('span_length'),
+ span_depth=logical_disk.get('span_depth'),
+ error_handler=self.volume_create_error_handler)
+ # only save the async tasks (task_monitors) in raid_config
+ if (response is not None
+ and hasattr(response, 'task_monitor_uri')):
+ raid_config['operation'] = 'create'
+ raid_config['raid_controller'] = logical_disk.get(
+ 'controller')
+ raid_config['task_monitor_uri'] = response.task_monitor_uri
+ reboot_required = True
+ raid_configs.append(raid_config)
+
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['raid_configs'] = raid_configs
+ node.driver_internal_info = driver_internal_info
+
+ return_state = None
+ deploy_utils.set_async_step_flags(
+ node,
+ reboot=reboot_required,
+ skip_current_step=True,
+ polling=True)
+ if reboot_required:
+ return_state = deploy_utils.get_async_step_return_state(task.node)
+ deploy_opts = deploy_utils.build_agent_options(task.node)
+ task.driver.boot.prepare_ramdisk(task, deploy_opts)
+ manager_utils.node_power_action(task, states.REBOOT)
+
+ return self.post_create_configuration(
+ task, raid_configs, return_state=return_state)
+
+ @base.clean_step(priority=0)
+ @base.deploy_step(priority=0)
+ def delete_configuration(self, task):
+ """Delete RAID configuration on the node.
+
+ :param task: TaskManager object containing the node.
+ :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment)
+ if deletion is in progress asynchronously or None if it is
+ complete.
+ """
+ node = task.node
+ system = redfish_utils.get_system(node)
+ vols_to_delete = []
+ try:
+ for storage in system.storage.get_members():
+ controller = (storage.storage_controllers[0]
+ if storage.storage_controllers else None)
+ controller_name = None
+ if controller and controller.identifiers:
+ controller_name = controller.identifiers[0].durable_name
+ for volume in storage.volumes.get_members():
+ if (volume.raid_type or volume.volume_type not in
+ [None, sushy.VOLUME_TYPE_RAW_DEVICE]):
+ vols_to_delete.append((storage.volumes, volume,
+ controller_name))
+ except sushy.exceptions.SushyError as exc:
+ error_msg = _('Cannot get the list of volumes to delete for node '
+ '%(node_uuid)s. Reason: %(error)s.' %
+ {'node_uuid': node.uuid, 'error': exc})
+ LOG.error(error_msg)
+ raise exception.RedfishError(error=exc)
+
+ self.pre_delete_configuration(task, vols_to_delete)
+
+ reboot_required = False
+ raid_configs = list()
+ for vol_coll, volume, controller_name in vols_to_delete:
+ raid_config = dict()
+ apply_time = None
+ apply_time_support = vol_coll.operation_apply_time_support
+ if (apply_time_support
+ and apply_time_support.mapped_supported_values):
+ supported_values = apply_time_support.mapped_supported_values
+ if sushy.APPLY_TIME_IMMEDIATE in supported_values:
+ apply_time = sushy.APPLY_TIME_IMMEDIATE
+ elif sushy.APPLY_TIME_ON_RESET in supported_values:
+ apply_time = sushy.APPLY_TIME_ON_RESET
+ response = volume.delete(apply_time=apply_time)
+ # only save the async tasks (task_monitors) in raid_config
+ if (response is not None
+ and hasattr(response, 'task_monitor_uri')):
+ raid_config['operation'] = 'delete'
+ raid_config['raid_controller'] = controller_name
+ raid_config['task_monitor_uri'] = response.task_monitor_uri
+ reboot_required = True
+ raid_configs.append(raid_config)
+
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['raid_configs'] = raid_configs
+ node.driver_internal_info = driver_internal_info
+
+ return_state = None
+ deploy_utils.set_async_step_flags(
+ node,
+ reboot=reboot_required,
+ skip_current_step=True,
+ polling=True)
+ if reboot_required:
+ return_state = deploy_utils.get_async_step_return_state(task.node)
+ deploy_opts = deploy_utils.build_agent_options(task.node)
+ task.driver.boot.prepare_ramdisk(task, deploy_opts)
+ manager_utils.node_power_action(task, states.REBOOT)
+
+ return self.post_delete_configuration(
+ task, raid_configs, return_state=return_state)
+
+ def volume_create_error_handler(self, task, exc, volume_collection,
+ payload):
+ """Handle error from failed VolumeCollection.create()
+
+ Extension point to allow vendor implementations to extend this class
+ and override this method to perform a custom action if the call to
+ VolumeCollection.create() fails.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param exc: the exception raised by VolumeCollection.create().
+ :param volume_collection: the sushy VolumeCollection instance.
+ :param payload: the payload passed to the failed create().
+ :returns: Newly created Volume resource or TaskMonitor if async task.
+ :raises: RedfishError if there is an error creating the virtual disk.
+ """
+ raise exc
+
+ def pre_create_configuration(self, task, logical_disks_to_create):
+ """Perform required actions before creating config.
+
+ Extension point to allow vendor implementations to extend this class
+ and override this method to perform custom actions prior to creating
+ the RAID configuration on the Redfish service.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param logical_disks_to_create: list of logical disks to create.
+ """
+ pass
+
+ def post_create_configuration(self, task, raid_configs, return_state=None):
+ """Perform post create_configuration action to commit the config.
+
+ Extension point to allow vendor implementations to extend this class
+ and override this method to perform a custom action to commit the
+ RAID create configuration to the Redfish service.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param raid_configs: a list of dictionaries containing the RAID
+ configuration operation details.
+ :param return_state: state to return based on operation being invoked
+ """
+ return return_state
+
+ def pre_delete_configuration(self, task, vols_to_delete):
+ """Perform required actions before deleting config.
+
+ Extension point to allow vendor implementations to extend this class
+ and override this method to perform custom actions prior to deleting
+ the RAID configuration on the Redfish service.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param vols_to_delete: list of volumes to delete.
+ """
+ pass
+
+ def post_delete_configuration(self, task, raid_configs, return_state=None):
+ """Perform post delete_configuration action to commit the config.
+
+ Extension point to allow vendor implementations to extend this class
+ and override this method to perform a custom action to commit the
+ RAID delete configuration to the Redfish service.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param raid_configs: a list of dictionaries containing the RAID
+ configuration operation details.
+ :param return_state: state to return based on operation being invoked
+ """
+ return return_state
+
+ def _clear_raid_configs(self, node):
+ """Clears RAID configurations from driver_internal_info
+
+ Note that the caller must have an exclusive lock on the node.
+
+ :param node: the node to clear the RAID configs from
+ """
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info.pop('raid_configs', None)
+ node.driver_internal_info = driver_internal_info
+ node.save()
+
+ @METRICS.timer('RedfishRAID._query_raid_config_failed')
+ @periodics.periodic(
+ spacing=CONF.redfish.raid_config_fail_interval,
+ enabled=CONF.redfish.raid_config_fail_interval > 0)
+ def _query_raid_config_failed(self, manager, context):
+ """Periodic job to check for failed RAID configuration."""
+
+ filters = {'reserved': False, 'provision_state': states.CLEANFAIL,
+ 'maintenance': True}
+
+ fields = ['driver_internal_info']
+
+ node_list = manager.iter_nodes(fields=fields, filters=filters)
+ for (node_uuid, driver, conductor_group,
+ driver_internal_info) in node_list:
+ try:
+ lock_purpose = 'checking async RAID config failed.'
+ with task_manager.acquire(context, node_uuid,
+ purpose=lock_purpose,
+ shared=True) as task:
+ if not isinstance(task.driver.raid, RedfishRAID):
+ continue
+
+ raid_configs = driver_internal_info.get(
+ 'raid_configs')
+ if not raid_configs:
+ continue
+
+ node = task.node
+
+ # A RAID config failed. Discard any remaining RAID
+ # configs so when the user takes the node out of
+ # maintenance mode, pending RAID configs do not
+ # automatically continue.
+ LOG.warning('RAID configuration failed for node %(node)s. '
+ 'Discarding remaining RAID configurations.',
+ {'node': node.uuid})
+
+ task.upgrade_lock()
+ self._clear_raid_configs(node)
+
+ except exception.NodeNotFound:
+ LOG.info('During _query_raid_config_failed, node '
+ '%(node)s was not found and presumed deleted by '
+ 'another process.', {'node': node_uuid})
+ except exception.NodeLocked:
+ LOG.info('During _query_raid_config_failed, node '
+ '%(node)s was already locked by another process. '
+ 'Skip.', {'node': node_uuid})
+
+ @METRICS.timer('RedfishRAID._query_raid_config_status')
+ @periodics.periodic(
+ spacing=CONF.redfish.raid_config_status_interval,
+ enabled=CONF.redfish.raid_config_status_interval > 0)
+ def _query_raid_config_status(self, manager, context):
+ """Periodic job to check RAID config tasks."""
+
+ filters = {'reserved': False, 'provision_state': states.CLEANWAIT}
+ fields = ['driver_internal_info']
+
+ node_list = manager.iter_nodes(fields=fields, filters=filters)
+ for (node_uuid, driver, conductor_group,
+ driver_internal_info) in node_list:
+ try:
+ lock_purpose = 'checking async RAID config tasks.'
+ with task_manager.acquire(context, node_uuid,
+ purpose=lock_purpose,
+ shared=True) as task:
+ if not isinstance(task.driver.raid, RedfishRAID):
+ continue
+
+ raid_configs = driver_internal_info.get(
+ 'raid_configs')
+ if not raid_configs:
+ continue
+
+ self._check_node_raid_config(task)
+
+ except exception.NodeNotFound:
+ LOG.info('During _query_raid_config_status, node '
+ '%(node)s was not found and presumed deleted by '
+ 'another process.', {'node': node_uuid})
+ except exception.NodeLocked:
+ LOG.info('During _query_raid_config_status, node '
+ '%(node)s was already locked by another process. '
+ 'Skip.', {'node': node_uuid})
+
+ def _get_error_messages(self, response):
+ try:
+ body = response.json()
+ except ValueError:
+ return []
+ else:
+ error = body.get('error', {})
+ code = error.get('code', '')
+ message = error.get('message', code)
+ ext_info = error.get('@Message.ExtendedInfo', [{}])
+ messages = [m.get('Message') for m in ext_info if 'Message' in m]
+ if not messages and message:
+ messages = [message]
+ return messages
+
+ def _raid_config_in_progress(self, task, raid_config):
+ """Check if this RAID configuration operation is still in progress."""
+ task_monitor_uri = raid_config['task_monitor_uri']
+ try:
+ task_monitor = redfish_utils.get_task_monitor(task.node,
+ task_monitor_uri)
+ except exception.RedfishError:
+ LOG.info('Unable to get status of RAID %(operation)s task to node '
+ '%(node_uuid)s; assuming task completed successfully',
+ {'operation': raid_config['operation'],
+ 'node_uuid': task.node.uuid})
+ return False
+ if task_monitor.is_processing:
+ LOG.debug('RAID %(operation)s task %(task_mon)s to node '
+ '%(node_uuid)s still in progress',
+ {'operation': raid_config['operation'],
+ 'task_mon': task_monitor.task_monitor_uri,
+ 'node_uuid': task.node.uuid})
+ return True
+ else:
+ response = task_monitor.response
+ if response is not None:
+ status_code = response.status_code
+ if status_code >= 400:
+ messages = self._get_error_messages(response)
+ LOG.error('RAID %(operation)s task to node '
+ '%(node_uuid)s failed with status '
+ '%(status_code)s; messages: %(messages)s',
+ {'operation': raid_config['operation'],
+ 'node_uuid': task.node.uuid,
+ 'status_code': status_code,
+ 'messages': ", ".join(messages)})
+ else:
+ LOG.info('RAID %(operation)s task to node '
+ '%(node_uuid)s completed with status '
+ '%(status_code)s',
+ {'operation': raid_config['operation'],
+ 'node_uuid': task.node.uuid,
+ 'status_code': status_code})
+ return False
+
+ @METRICS.timer('RedfishRAID._check_node_raid_config')
+ def _check_node_raid_config(self, task):
+ """Check the progress of running RAID config on a node."""
+ node = task.node
+ raid_configs = node.driver_internal_info['raid_configs']
+
+ task.upgrade_lock()
+ raid_configs[:] = [i for i in raid_configs
+ if self._raid_config_in_progress(task, i)]
+
+ if not raid_configs:
+ self._clear_raid_configs(node)
+ LOG.info('RAID configuration completed for node %(node)s',
+ {'node': node.uuid})
+ manager_utils.notify_conductor_resume_clean(task)
diff --git a/ironic/drivers/modules/redfish/utils.py b/ironic/drivers/modules/redfish/utils.py
index c1eaa8dc0..58c88ccce 100644
--- a/ironic/drivers/modules/redfish/utils.py
+++ b/ironic/drivers/modules/redfish/utils.py
@@ -285,6 +285,24 @@ def get_system(node):
raise exception.RedfishError(error=e)
+def get_task_monitor(node, uri):
+ """Get a TaskMonitor for a node.
+
+ :param node: an Ironic node object
+ :param uri: the URI of a TaskMonitor
+ :raises: RedfishConnectionError when it fails to connect to Redfish
+ :raises: RedfishError when the TaskMonitor is not available in Redfish
+ """
+
+ try:
+ return _get_connection(node, lambda conn: conn.get_task_monitor(uri))
+ except sushy.exceptions.ResourceNotFoundError as e:
+ LOG.error('The Redfish TaskMonitor "%(uri)s" was not found for '
+ 'node %(node)s. Error %(error)s',
+ {'uri': uri, 'node': node.uuid, 'error': e})
+ raise exception.RedfishError(error=e)
+
+
def _get_connection(node, lambda_fun, *args):
"""Get a Redfish connection to a node.
diff --git a/ironic/drivers/redfish.py b/ironic/drivers/redfish.py
index 9a00b0497..d51e58b6f 100644
--- a/ironic/drivers/redfish.py
+++ b/ironic/drivers/redfish.py
@@ -14,6 +14,7 @@
# under the License.
from ironic.drivers import generic
+from ironic.drivers.modules import agent
from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import noop
@@ -24,6 +25,7 @@ from ironic.drivers.modules.redfish import boot as redfish_boot
from ironic.drivers.modules.redfish import inspect as redfish_inspect
from ironic.drivers.modules.redfish import management as redfish_mgmt
from ironic.drivers.modules.redfish import power as redfish_power
+from ironic.drivers.modules.redfish import raid as redfish_raid
from ironic.drivers.modules.redfish import vendor as redfish_vendor
@@ -63,3 +65,8 @@ class RedfishHardware(generic.GenericHardware):
def supported_vendor_interfaces(self):
"""List of supported vendor interfaces."""
return [redfish_vendor.RedfishVendorPassthru, noop.NoVendor]
+
+ @property
+ def supported_raid_interfaces(self):
+ """List of supported raid interfaces."""
+ return [redfish_raid.RedfishRAID, noop.NoRAID, agent.AgentRAID]
diff --git a/ironic/objects/portgroup.py b/ironic/objects/portgroup.py
index 4c6a763a2..8628df731 100644
--- a/ironic/objects/portgroup.py
+++ b/ironic/objects/portgroup.py
@@ -152,17 +152,19 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
- def get_by_address(cls, context, address):
+ def get_by_address(cls, context, address, project=None):
"""Find portgroup by address and return a :class:`Portgroup` object.
:param cls: the :class:`Portgroup`
:param context: Security context
:param address: The MAC address of a portgroup.
+ :param project: a node owner or lessee to match against.
:returns: A :class:`Portgroup` object.
:raises: PortgroupNotFound
"""
- db_portgroup = cls.dbapi.get_portgroup_by_address(address)
+ db_portgroup = cls.dbapi.get_portgroup_by_address(address,
+ project=project)
portgroup = cls._from_db_object(context, cls(), db_portgroup)
return portgroup
@@ -191,7 +193,7 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
"""Return a list of Portgroup objects.
:param cls: the :class:`Portgroup`
@@ -200,6 +202,7 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
:param marker: Pagination marker for large data sets.
:param sort_key: Column to sort results by.
:param sort_dir: Direction to sort. "asc" or "desc".
+ :param project: a node owner or lessee to match against.
:returns: A list of :class:`Portgroup` object.
:raises: InvalidParameterValue
@@ -207,7 +210,8 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
db_portgroups = cls.dbapi.get_portgroup_list(limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
return cls._from_db_object_list(context, db_portgroups)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
@@ -216,7 +220,7 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
"""Return a list of Portgroup objects associated with a given node ID.
:param cls: the :class:`Portgroup`
@@ -226,6 +230,7 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
:param marker: Pagination marker for large data sets.
:param sort_key: Column to sort results by.
:param sort_dir: Direction to sort. "asc" or "desc".
+ :param project: a node owner or lessee to match against.
:returns: A list of :class:`Portgroup` object.
:raises: InvalidParameterValue
@@ -234,7 +239,8 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
return cls._from_db_object_list(context, db_portgroups)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
diff --git a/ironic/objects/volume_connector.py b/ironic/objects/volume_connector.py
index e91706d78..57070ab06 100644
--- a/ironic/objects/volume_connector.py
+++ b/ironic/objects/volume_connector.py
@@ -108,7 +108,7 @@ class VolumeConnector(base.IronicObject,
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
"""Return a list of VolumeConnector objects.
:param context: security context
@@ -116,13 +116,15 @@ class VolumeConnector(base.IronicObject,
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
+ :param project: The associated node project to search with.
:returns: a list of :class:`VolumeConnector` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_connectors = cls.dbapi.get_volume_connector_list(limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
return cls._from_db_object_list(context, db_connectors)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
@@ -131,7 +133,7 @@ class VolumeConnector(base.IronicObject,
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
"""Return a list of VolumeConnector objects related to a given node ID.
:param context: security context
@@ -140,6 +142,8 @@ class VolumeConnector(base.IronicObject,
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
+ :param project: The associated node project to search with.
+ :returns: a list of :class:`VolumeConnector` objects
:returns: a list of :class:`VolumeConnector` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
@@ -148,7 +152,8 @@ class VolumeConnector(base.IronicObject,
limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
return cls._from_db_object_list(context, db_connectors)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
diff --git a/ironic/objects/volume_target.py b/ironic/objects/volume_target.py
index 68a54c435..051d9ed9e 100644
--- a/ironic/objects/volume_target.py
+++ b/ironic/objects/volume_target.py
@@ -107,7 +107,7 @@ class VolumeTarget(base.IronicObject,
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
"""Return a list of VolumeTarget objects.
:param context: security context
@@ -115,13 +115,16 @@ class VolumeTarget(base.IronicObject,
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
+ :param project: The associated node project to search with.
+ :returns: a list of :class:`VolumeConnector` objects
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_targets = cls.dbapi.get_volume_target_list(limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
@@ -130,7 +133,7 @@ class VolumeTarget(base.IronicObject,
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
"""Return a list of VolumeTarget objects related to a given node ID.
:param context: security context
@@ -139,6 +142,8 @@ class VolumeTarget(base.IronicObject,
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
+ :param project: The associated node project to search with.
+ :returns: a list of :class:`VolumeConnector` objects
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
@@ -147,7 +152,8 @@ class VolumeTarget(base.IronicObject,
limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
@@ -156,7 +162,7 @@ class VolumeTarget(base.IronicObject,
# @object_base.remotable_classmethod
@classmethod
def list_by_volume_id(cls, context, volume_id, limit=None, marker=None,
- sort_key=None, sort_dir=None):
+ sort_key=None, sort_dir=None, project=None):
"""Return a list of VolumeTarget objects related to a given volume ID.
:param context: security context
@@ -174,7 +180,8 @@ class VolumeTarget(base.IronicObject,
limit=limit,
marker=marker,
sort_key=sort_key,
- sort_dir=sort_dir)
+ sort_dir=sort_dir,
+ project=project)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index 04289aa13..c983f6d86 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -2383,6 +2383,16 @@ class TestPatch(test_api_base.BaseApiTest):
self.node_no_name = obj_utils.create_test_node(
self.context, uuid='deadbeef-0000-1111-2222-333333333333',
chassis_id=self.chassis.id)
+ self.port = obj_utils.create_test_port(
+ self.context,
+ uuid='9bb50f13-0b8d-4ade-ad2d-d91fefdef9cc',
+ address='00:01:02:03:04:05',
+ node_id=self.node.id)
+ self.portgroup = obj_utils.create_test_portgroup(
+ self.context,
+ uuid='9bb50f13-0b8d-4ade-ad2d-d91fefdef9ff',
+ address='00:00:00:00:00:ff',
+ node_id=self.node.id)
p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for',
autospec=True)
self.mock_gtf = p.start()
@@ -2693,7 +2703,7 @@ class TestPatch(test_api_base.BaseApiTest):
def test_patch_portgroups_subresource(self):
response = self.patch_json(
- '/nodes/%s/portgroups/9bb50f13-0b8d-4ade-ad2d-d91fefdef9cc' %
+ '/nodes/%s/portgroups/9bb50f13-0b8d-4ade-ad2d-d91fefdef9ff' %
self.node.uuid,
[{'path': '/extra/foo', 'value': 'bar',
'op': 'add'}], expect_errors=True,
diff --git a/ironic/tests/unit/api/controllers/v1/test_utils.py b/ironic/tests/unit/api/controllers/v1/test_utils.py
index 67b0b55d5..5826af89b 100644
--- a/ironic/tests/unit/api/controllers/v1/test_utils.py
+++ b/ironic/tests/unit/api/controllers/v1/test_utils.py
@@ -891,14 +891,18 @@ class TestNodeIdent(base.TestCase):
self.assertRaises(exception.NodeNotFound,
utils.populate_node_uuid, port, d)
+ @mock.patch.object(utils, 'check_owner_policy', autospec=True)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
- def test_replace_node_uuid_with_id(self, mock_gbu, mock_pr):
+ def test_replace_node_uuid_with_id(self, mock_gbu, mock_check, mock_pr):
node = obj_utils.get_test_node(self.context, id=1)
mock_gbu.return_value = node
to_dict = {'node_uuid': self.valid_uuid}
self.assertEqual(node, utils.replace_node_uuid_with_id(to_dict))
self.assertEqual({'node_id': 1}, to_dict)
+ mock_check.assert_called_once_with('node', 'baremetal:node:get',
+ None, None,
+ conceal_node=mock.ANY)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
def test_replace_node_uuid_with_id_not_found(self, mock_gbu, mock_pr):
@@ -1136,10 +1140,13 @@ class TestCheckNodePolicyAndRetrieve(base.TestCase):
rpc_node = utils.check_node_policy_and_retrieve(
'fake_policy', self.valid_node_uuid
)
+ authorize_calls = [
+ mock.call('baremetal:node:get', expected_target, fake_context),
+ mock.call('fake_policy', expected_target, fake_context)]
+
mock_grn.assert_called_once_with(self.valid_node_uuid)
mock_grnws.assert_not_called()
- mock_authorize.assert_called_once_with(
- 'fake_policy', expected_target, fake_context)
+ mock_authorize.assert_has_calls(authorize_calls)
self.assertEqual(self.node, rpc_node)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@@ -1162,14 +1169,16 @@ class TestCheckNodePolicyAndRetrieve(base.TestCase):
)
mock_grn.assert_not_called()
mock_grnws.assert_called_once_with(self.valid_node_uuid)
- mock_authorize.assert_called_once_with(
- 'fake_policy', expected_target, fake_context)
+ authorize_calls = [
+ mock.call('baremetal:node:get', expected_target, fake_context),
+ mock.call('fake_policy', expected_target, fake_context)]
+ mock_authorize.assert_has_calls(authorize_calls)
self.assertEqual(self.node, rpc_node)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_node', autospec=True)
- def test_check_node_policy_and_retrieve_no_node_policy_forbidden(
+ def test_check_node_policy_and_retrieve_no_node_policy_notfound(
self, mock_grn, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {}
@@ -1178,7 +1187,7 @@ class TestCheckNodePolicyAndRetrieve(base.TestCase):
node=self.valid_node_uuid)
self.assertRaises(
- exception.HTTPForbidden,
+ exception.NodeNotFound,
utils.check_node_policy_and_retrieve,
'fake-policy',
self.valid_node_uuid
@@ -1213,7 +1222,7 @@ class TestCheckNodePolicyAndRetrieve(base.TestCase):
mock_grn.return_value = self.node
self.assertRaises(
- exception.HTTPForbidden,
+ exception.NodeNotFound,
utils.check_node_policy_and_retrieve,
'fake-policy',
self.valid_node_uuid
@@ -1502,8 +1511,12 @@ class TestCheckPortPolicyAndRetrieve(base.TestCase):
mock_pgbu.assert_called_once_with(mock_pr.context,
self.valid_port_uuid)
mock_ngbi.assert_called_once_with(mock_pr.context, 42)
- mock_authorize.assert_called_once_with(
- 'fake_policy', expected_target, fake_context)
+ expected = [
+ mock.call('baremetal:node:get', expected_target, fake_context),
+ mock.call('fake_policy', expected_target, fake_context)]
+
+ mock_authorize.assert_has_calls(expected)
+
self.assertEqual(self.port, rpc_port)
self.assertEqual(self.node, rpc_node)
@@ -1519,7 +1532,7 @@ class TestCheckPortPolicyAndRetrieve(base.TestCase):
port=self.valid_port_uuid)
self.assertRaises(
- exception.HTTPForbidden,
+ exception.PortNotFound,
utils.check_port_policy_and_retrieve,
'fake-policy',
self.valid_port_uuid
@@ -1546,7 +1559,7 @@ class TestCheckPortPolicyAndRetrieve(base.TestCase):
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(objects.Port, 'get_by_uuid', autospec=True)
@mock.patch.object(objects.Node, 'get_by_id', autospec=True)
- def test_check_port_policy_and_retrieve_policy_forbidden(
+ def test_check_port_policy_and_retrieve_policy_notfound(
self, mock_ngbi, mock_pgbu, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
@@ -1556,7 +1569,7 @@ class TestCheckPortPolicyAndRetrieve(base.TestCase):
mock_ngbi.return_value = self.node
self.assertRaises(
- exception.HTTPForbidden,
+ exception.PortNotFound,
utils.check_port_policy_and_retrieve,
'fake-policy',
self.valid_port_uuid
diff --git a/ironic/tests/unit/api/test_acl.py b/ironic/tests/unit/api/test_acl.py
index fff0927ae..023c47c8c 100644
--- a/ironic/tests/unit/api/test_acl.py
+++ b/ironic/tests/unit/api/test_acl.py
@@ -142,17 +142,30 @@ class TestACLBase(base.BaseApiTest):
)
else:
assert False, 'Unimplemented test method: %s' % method
-
+ # Once miggrated:
+ # Items will return:
+ # 403 - Trying to access something that is generally denied.
+ # Example: PATCH /v1/nodes/<uuid> as a reader.
+ # 404 - Trying to access something where we don't have permissions
+ # in a project scope. This is particularly true where implied
+ # permissions or assocation exists. Ports are attempted to be
+ # accessed when the underlying node is inaccessible as owner
+ # nor node matches.
+ # Example: GET /v1/portgroups or /v1/nodes/<uuid>/ports
+ # 500 - Attempting to access something such an system scoped endpoint
+ # with a project scoped request. Example: /v1/conductors.
if not (bool(deprecated)
- and ('403' in response.status or '500' in response.status)
+ and ('404' in response.status
+ or '500' in response.status
+ or '403' in response.status)
and cfg.CONF.oslo_policy.enforce_scope
and cfg.CONF.oslo_policy.enforce_new_defaults):
- # NOTE(TheJulia): Everything, once migrated, should
- # return a 403.
self.assertEqual(assert_status, response.status_int)
else:
self.assertTrue(
- '403' in response.status or '500' in response.status)
+ ('404' in response.status
+ or '500' in response.status
+ or '403' in response.status))
# We can't check the contents of the response if there is no
# response.
return
@@ -163,8 +176,23 @@ class TestACLBase(base.BaseApiTest):
if assert_dict_contains:
for k, v in assert_dict_contains.items():
self.assertIn(k, response)
- self.assertEqual(v.format(**self.format_data),
- response.json[k])
+ print(k)
+ print(v)
+ if str(v) == "None":
+ # Compare since the variable loaded from the
+ # json ends up being null in json or None.
+ self.assertIsNone(response.json[k])
+ elif str(v) == "{}":
+ # Special match for signifying a dictonary.
+ self.assertEqual({}, response.json[k])
+ elif isinstance(v, dict):
+ # The value from the YAML can be a dictionary,
+ # which cannot be formatted, so we're likely doing
+ # direct matching.
+ self.assertEqual(str(v), str(response.json[k]))
+ else:
+ self.assertEqual(v.format(**self.format_data),
+ response.json[k])
if assert_list_length:
for root, length in assert_list_length.items():
@@ -173,7 +201,14 @@ class TestACLBase(base.BaseApiTest):
# important for owner/lessee testing.
items = response.json[root]
self.assertIsInstance(items, list)
- self.assertEqual(length, len(items))
+ if not (bool(deprecated)
+ and cfg.CONF.oslo_policy.enforce_scope):
+ self.assertEqual(length, len(items))
+ else:
+ # If we have scope enforcement, we likely have different
+ # views, such as "other" admins being subjected to
+ # a filtered view in these cases.
+ self.assertEqual(0, len(items))
# NOTE(TheJulia): API tests in Ironic tend to have a pattern
# to print request and response data to aid in development
@@ -207,18 +242,21 @@ class TestRBACModelBeforeScopesBase(TestACLBase):
resource_class="CUSTOM_TEST")
fake_db_node = db_utils.create_test_node(
chassis_id=None,
- driver='fake-driverz')
+ driver='fake-driverz',
+ owner='z')
fake_db_node_alloced = db_utils.create_test_node(
id=allocated_node_id,
chassis_id=None,
allocation_id=fake_db_allocation['id'],
uuid='22e26c0b-03f2-4d2e-ae87-c02d7f33c000',
- driver='fake-driverz')
+ driver='fake-driverz',
+ owner='z')
fake_vif_port_id = "ee21d58f-5de2-4956-85ff-33935ea1ca00"
fake_db_port = db_utils.create_test_port(
node_id=fake_db_node['id'],
internal_info={'tenant_vif_port_id': fake_vif_port_id})
fake_db_portgroup = db_utils.create_test_portgroup(
+ uuid="6eb02b44-18a3-4659-8c0b-8d2802581ae4",
node_id=fake_db_node['id'])
fake_db_chassis = db_utils.create_test_chassis(
drivers=['fake-hardware', 'fake-driverz', 'fake-driver'])
@@ -242,7 +280,6 @@ class TestRBACModelBeforeScopesBase(TestACLBase):
# false positives with test runners.
db_utils.create_test_node(
uuid='18a552fb-dcd2-43bf-9302-e4c93287be11')
-
self.format_data.update({
'node_ident': fake_db_node['uuid'],
'allocated_node_ident': fake_db_node_alloced['uuid'],
@@ -333,15 +370,42 @@ class TestRBACProjectScoped(TestACLBase):
owner_project_id = '70e5e25a-2ca2-4cb1-8ae8-7d8739cee205'
lessee_project_id = 'f11853c7-fa9c-4db3-a477-c9d8e0dbbf13'
unowned_node = db_utils.create_test_node(chassis_id=None)
+
# owned node - since the tests use the same node for
# owner/lesse checks
- db_utils.create_test_node(
+ owned_node = db_utils.create_test_node(
uuid=owner_node_ident,
- owner=owner_node_ident)
+ owner=owner_project_id,
+ last_error='meow',
+ reservation='lolcats')
+ owned_node_port = db_utils.create_test_port(
+ uuid='ebe30f19-358d-41e1-8d28-fd7357a0164c',
+ node_id=owned_node['id'],
+ address='00:00:00:00:00:01')
+ db_utils.create_test_port(
+ uuid='21a3c5a7-1e14-44dc-a9dd-0c84d5477a57',
+ node_id=owned_node['id'],
+ address='00:00:00:00:00:02')
+ owner_pgroup = db_utils.create_test_portgroup(
+ uuid='b16efcf3-2990-41a1-bc1d-5e2c16f3d5fc',
+ node_id=owned_node['id'],
+ name='magicfoo',
+ address='01:03:09:ff:01:01')
+ db_utils.create_test_volume_target(
+ uuid='a265e2f0-e97f-4177-b1c0-8298add53086',
+ node_id=owned_node['id'])
+ db_utils.create_test_volume_connector(
+ uuid='65ea0296-219b-4635-b0c8-a6e055da878d',
+ node_id=owned_node['id'],
+ connector_id='iqn.2012-06.org.openstack.magic')
+
+ # Leased nodes
leased_node = db_utils.create_test_node(
uuid=lessee_node_ident,
owner=owner_project_id,
- lessee=lessee_project_id)
+ lessee=lessee_project_id,
+ last_error='meow',
+ reservation='lolcats')
fake_db_volume_target = db_utils.create_test_volume_target(
node_id=leased_node['id'])
fake_db_volume_connector = db_utils.create_test_volume_connector(
@@ -350,6 +414,21 @@ class TestRBACProjectScoped(TestACLBase):
node_id=leased_node['id'])
fake_db_portgroup = db_utils.create_test_portgroup(
node_id=leased_node['id'])
+ fake_trait = 'CUSTOM_MEOW'
+ fake_vif_port_id = "0e21d58f-5de2-4956-85ff-33935ea1ca01"
+
+ # Random objects that shouldn't be project visible
+ other_port = db_utils.create_test_port(
+ uuid='abfd8dbb-1732-449a-b760-2224035c6b99',
+ address='00:00:00:00:00:ff')
+
+ other_node = db_utils.create_test_node(
+ uuid='573208e5-cd41-4e26-8f06-ef44022b3793')
+ other_pgroup = db_utils.create_test_portgroup(
+ uuid='5810f41c-6585-41fc-b9c9-a94f50d421b5',
+ node_id=other_node['id'],
+ name='corgis_rule_the_world',
+ address='ff:ff:ff:ff:ff:0f')
self.format_data.update({
'node_ident': unowned_node['uuid'],
@@ -358,8 +437,17 @@ class TestRBACProjectScoped(TestACLBase):
'allocated_node_ident': lessee_node_ident,
'volume_target_ident': fake_db_volume_target['uuid'],
'volume_connector_ident': fake_db_volume_connector['uuid'],
- 'port_ident': fake_db_port['uuid'],
- 'portgroup_ident': fake_db_portgroup['uuid']})
+ 'lessee_port_ident': fake_db_port['uuid'],
+ 'lessee_portgroup_ident': fake_db_portgroup['uuid'],
+ 'trait': fake_trait,
+ 'vif_ident': fake_vif_port_id,
+ 'ind_component': 'component',
+ 'ind_ident': 'magic_light',
+ 'owner_port_ident': owned_node_port['uuid'],
+ 'other_port_ident': other_port['uuid'],
+ 'owner_portgroup_ident': owner_pgroup['uuid'],
+ 'other_portgroup_ident': other_pgroup['uuid'],
+ 'driver_name': 'fake-driverz'})
@ddt.file_data('test_rbac_project_scoped.yaml')
@ddt.unpack
diff --git a/ironic/tests/unit/api/test_acl_basic.yaml b/ironic/tests/unit/api/test_acl_basic.yaml
index 78e860180..1ec0ab07a 100644
--- a/ironic/tests/unit/api/test_acl_basic.yaml
+++ b/ironic/tests/unit/api/test_acl_basic.yaml
@@ -11,7 +11,7 @@ values:
unauthenticated_user_cannot_get_node:
path: &node_path '/v1/nodes/{node_uuid}'
- assert_status: 403
+ assert_status: 404
project_admin_can_get_node:
path: *node_path
@@ -24,7 +24,7 @@ project_admin_can_get_node:
project_member_cannot_get_node:
path: *node_path
headers: *project_member_headers
- assert_status: 403
+ assert_status: 404
public_api:
path: /
diff --git a/ironic/tests/unit/api/test_rbac_legacy.yaml b/ironic/tests/unit/api/test_rbac_legacy.yaml
index 5f4009b38..deda21757 100644
--- a/ironic/tests/unit/api/test_rbac_legacy.yaml
+++ b/ironic/tests/unit/api/test_rbac_legacy.yaml
@@ -78,7 +78,7 @@ nodes_get_node_member:
path: '/v1/nodes/{node_ident}'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_get_node_observer:
@@ -152,7 +152,7 @@ nodes_node_ident_get_member:
path: '/v1/nodes/{node_ident}'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_node_ident_get_observer:
@@ -178,7 +178,7 @@ nodes_node_ident_patch_member:
method: patch
headers: *member_headers
body: *extra_patch
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_node_ident_patch_observer:
@@ -200,7 +200,7 @@ nodes_node_ident_delete_member:
path: '/v1/nodes/{node_ident}'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_node_ident_delete_observer:
@@ -223,7 +223,7 @@ nodes_validate_get_member:
path: '/v1/nodes/{node_ident}/validate'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_validate_get_observer:
@@ -244,7 +244,7 @@ nodes_maintenance_put_member:
path: '/v1/nodes/{node_ident}/maintenance'
method: put
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_maintenance_put_observer:
@@ -265,7 +265,7 @@ nodes_maintenance_delete_member:
path: '/v1/nodes/{node_ident}/maintenance'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_maintenance_delete_observer:
@@ -289,7 +289,7 @@ nodes_management_boot_device_put_member:
method: put
headers: *member_headers
body: *boot_device_body
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_management_boot_device_put_observer:
@@ -311,7 +311,7 @@ nodes_management_boot_device_get_member:
path: '/v1/nodes/{node_ident}/management/boot_device'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_management_boot_device_get_observer:
@@ -332,7 +332,7 @@ nodes_management_boot_device_supported_get_member:
path: '/v1/nodes/{node_ident}/management/boot_device/supported'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_management_boot_device_supported_get_observer:
@@ -355,7 +355,7 @@ nodes_management_inject_nmi_put_member:
method: put
headers: *member_headers
body: {}
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_management_inject_nmi_put_observer:
@@ -377,7 +377,7 @@ nodes_states_get_member:
path: '/v1/nodes/{node_ident}/states'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_states_get_observer:
@@ -401,7 +401,7 @@ nodes_states_power_put_member:
method: put
headers: *member_headers
body: *power_body
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_states_power_put_observer:
@@ -426,7 +426,7 @@ nodes_states_provision_put_member:
method: put
headers: *member_headers
body: *provision_body
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_states_provision_put_observer:
@@ -455,7 +455,7 @@ nodes_states_raid_put_member:
method: put
headers: *member_headers
body: *raid_body
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_states_raid_put_observer:
@@ -477,7 +477,7 @@ nodes_states_console_get_member:
path: '/v1/nodes/{node_ident}/states/console'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_states_console_get_admin:
@@ -501,7 +501,7 @@ nodes_states_console_put_member:
method: put
headers: *member_headers
body: *console_body_put
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_states_console_put_observer:
@@ -526,7 +526,7 @@ nodes_vendor_passthru_methods_get_member:
path: '/v1/nodes/{node_ident}/vendor_passthru/methods'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_vendor_passthru_methods_get_observer:
@@ -547,7 +547,7 @@ nodes_vendor_passthru_get_member:
path: '/v1/nodes/{node_ident}/vendor_passthru?method=test'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_vendor_passthru_get_observer:
@@ -568,7 +568,7 @@ nodes_vendor_passthru_post_member:
path: '/v1/nodes/{node_ident}/vendor_passthru?method=test'
method: post
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_vendor_passthru_post_observer:
@@ -589,7 +589,7 @@ nodes_vendor_passthru_put_member:
path: '/v1/nodes/{node_ident}/vendor_passthru?method=test'
method: put
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_vendor_passthru_put_observer:
@@ -610,7 +610,7 @@ nodes_vendor_passthru_delete_member:
path: '/v1/nodes/{node_ident}/vendor_passthru?method=test'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_vendor_passthru_delete_observer:
@@ -633,7 +633,7 @@ nodes_traits_get_member:
path: '/v1/nodes/{node_ident}/traits'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_traits_get_observer:
@@ -658,7 +658,7 @@ nodes_traits_put_member:
path: '/v1/nodes/{node_ident}/traits'
method: put
headers: *member_headers
- assert_status: 403
+ assert_status: 404
body: *traits_body
deprecated: true
@@ -681,7 +681,7 @@ nodes_traits_delete_member:
path: '/v1/nodes/{node_ident}/traits/{trait}'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_traits_delete_observer:
@@ -702,7 +702,7 @@ nodes_traits_trait_put_member:
path: '/v1/nodes/{node_ident}/traits/CUSTOM_TRAIT2'
method: put
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_traits_trait_put_observer:
@@ -723,7 +723,7 @@ nodes_traits_trait_delete_member:
path: '/v1/nodes/{node_ident}/traits/{trait}'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_traits_trait_delete_observer:
@@ -750,7 +750,7 @@ nodes_vifs_get_member:
path: '/v1/nodes/{node_ident}/vifs'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_vifs_get_observer:
@@ -773,7 +773,7 @@ nodes_vifs_post_member:
path: '/v1/nodes/{node_ident}/vifs'
method: post
headers: *member_headers
- assert_status: 403
+ assert_status: 404
body: *vif_body
deprecated: true
@@ -797,7 +797,7 @@ nodes_vifs_node_vif_ident_delete_member:
path: '/v1/nodes/{node_ident}/vifs/{vif_ident}'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_vifs_node_vif_ident_delete_observer:
@@ -820,7 +820,7 @@ nodes_management_indicators_get_member:
path: '/v1/nodes/{node_ident}/management/indicators'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_management_indicators_get_observer:
@@ -927,7 +927,7 @@ portgroups_portgroup_ident_get_member:
path: '/v1/portgroups/{portgroup_ident}'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
portgroups_portgroup_ident_get_observer:
@@ -953,7 +953,7 @@ portgroups_portgroup_ident_patch_member:
method: patch
headers: *member_headers
body: *portgroup_patch_body
- assert_status: 403
+ assert_status: 404
deprecated: true
portgroups_portgroup_ident_patch_observer:
@@ -975,7 +975,7 @@ portgroups_portgroup_ident_delete_member:
path: '/v1/portgroups/{portgroup_ident}'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
portgroups_portgroup_ident_delete_observer:
@@ -998,7 +998,7 @@ nodes_portgroups_get_member:
path: '/v1/nodes/{node_ident}/portgroups'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_portgroups_get_observer:
@@ -1019,7 +1019,7 @@ nodes_portgroups_detail_get_member:
path: '/v1/nodes/{node_ident}/portgroups/detail'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_portgroups_detail_get_observer:
@@ -1113,7 +1113,7 @@ ports_port_id_get_member:
path: '/v1/ports/{port_ident}'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
ports_port_id_get_observer:
@@ -1138,7 +1138,7 @@ ports_port_id_patch_member:
path: '/v1/ports/{port_ident}'
method: patch
headers: *member_headers
- assert_status: 403
+ assert_status: 404
body: *port_patch_body
deprecated: true
@@ -1161,7 +1161,7 @@ ports_port_id_delete_member:
path: '/v1/ports/{port_ident}'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
ports_port_id_delete_observer:
@@ -1184,7 +1184,7 @@ nodes_ports_get_member:
path: '/v1/nodes/{node_ident}/ports'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_ports_get_observer:
@@ -1205,7 +1205,7 @@ nodes_ports_detail_get_member:
path: '/v1/nodes/{node_ident}/ports/detail'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_ports_detail_get_observer:
@@ -1228,7 +1228,7 @@ portgroups_ports_get_member:
path: '/v1/portgroups/{portgroup_ident}/ports'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
portgroups_ports_get_observer:
@@ -1249,7 +1249,7 @@ portgroups_ports_detail_get_member:
path: '/v1/portgroups/{portgroup_ident}/ports/detail'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
portgroups_ports_detail_get_observer:
@@ -1315,9 +1315,9 @@ volume_connectors_post_admin:
path: '/v1/volume/connectors'
method: post
headers: *admin_headers
- assert_status: 400
+ assert_status: 201
body: &volume_connector_body
- node_uuid: 68a552fb-dcd2-43bf-9302-e4c93287be16
+ node_uuid: 1be26c0b-03f2-4d2e-ae87-c02d7f33c123
type: ip
connector_id: 192.168.1.100
deprecated: true
@@ -1349,7 +1349,7 @@ volume_volume_connector_id_get_member:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
volume_volume_connector_id_get_observer:
@@ -1375,7 +1375,7 @@ volume_volume_connector_id_patch_member:
method: patch
headers: *member_headers
body: *connector_patch_body
- assert_status: 403
+ assert_status: 404
deprecated: true
volume_volume_connector_id_patch_observer:
@@ -1397,7 +1397,7 @@ volume_volume_connector_id_delete_member:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
volume_volume_connector_id_delete_observer:
@@ -1437,11 +1437,11 @@ volume_targets_post_admin:
path: '/v1/volume/targets'
method: post
headers: *admin_headers
- assert_status: 400
+ assert_status: 201
body: &volume_target_body
- node_uuid: 68a552fb-dcd2-43bf-9302-e4c93287be16
+ node_uuid: 1be26c0b-03f2-4d2e-ae87-c02d7f33c123
volume_type: iscsi
- boot_index: 0
+ boot_index: 4
volume_id: 'test-id'
deprecated: true
@@ -1472,7 +1472,7 @@ volume_volume_target_id_get_member:
path: '/v1/volume/targets/{volume_target_ident}'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
volume_volume_target_id_get_observer:
@@ -1493,12 +1493,12 @@ volume_volume_target_id_patch_admin:
assert_status: 503
deprecated: true
-volume_volume_target_id_patch_admin:
+volume_volume_target_id_patch_member:
path: '/v1/volume/targets/{volume_target_ident}'
method: patch
body: *volume_target_patch
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
volume_volume_target_id_patch_observer:
@@ -1520,7 +1520,7 @@ volume_volume_target_id_delete_member:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
volume_volume_target_id_delete_observer:
@@ -1564,7 +1564,7 @@ nodes_volume_connectors_get_member:
path: '/v1/nodes/{node_ident}/volume/connectors'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_volume_connectors_get_observer:
@@ -1585,7 +1585,7 @@ nodes_volume_targets_get_member:
path: '/v1/nodes/{node_ident}/volume/targets'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_volume_targets_get_observer:
@@ -1804,7 +1804,7 @@ nodes_bios_get_member:
path: '/v1/nodes/{node_ident}/bios'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_bios_get_observer:
@@ -1825,7 +1825,7 @@ nodes_bios_bios_setting_get_member:
path: '/v1/nodes/{node_ident}/bios/{bios_setting}'
method: get
headers: *member_headers
- assert_status: 403
+ assert_status: 404
deprecated: true
nodes_bios_bios_setting_get_observer:
diff --git a/ironic/tests/unit/api/test_rbac_project_scoped.yaml b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
index b768e975a..f3cc91941 100644
--- a/ironic/tests/unit/api/test_rbac_project_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
@@ -63,13 +63,13 @@ values:
X-Project-Id: f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
X-Roles: reader
third_party_admin_headers: &third_party_admin_headers
- X-Auth-Token: 'other-admin-token'
+ X-Auth-Token: 'third-party-admin-token'
X-Project-Id: ae64129e-b188-4662-b014-4127f4366ee6
X-Roles: admin,member,reader
owner_project_id: &owner_project_id 70e5e25a-2ca2-4cb1-8ae8-7d8739cee205
lessee_project_id: &lessee_project_id f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
owned_node_ident: &owned_node_ident f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
- lessee_node_ident: &lessee_node_ident <TBD>
+ lessee_node_ident: &lessee_node_ident 38d5abed-c585-4fce-a57e-a2ffc2a2ec6f
# Nodes - https://docs.openstack.org/api-ref/baremetal/?expanded=#nodes-nodes
@@ -81,24 +81,21 @@ owner_admin_cannot_post_nodes:
body: &node_post_body
name: node
driver: fake-driverz
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 500
lessee_admin_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *lessee_admin_headers
body: *node_post_body
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 500
third_party_admin_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *third_party_admin_headers
body: *node_post_body
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 500
# Based on nodes_post_member
owner_member_cannot_post_nodes:
@@ -106,8 +103,7 @@ owner_member_cannot_post_nodes:
method: post
headers: *owner_member_headers
body: *node_post_body
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 500
# Based on nodes_post_reader
owner_reader_cannot_post_reader:
@@ -115,8 +111,7 @@ owner_reader_cannot_post_reader:
method: post
headers: *owner_reader_headers
body: *node_post_body
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 500
# Based on nodes_get_admin
# TODO: Create 3 nodes, 2 owned, 1 leased where it is also owned.
@@ -124,9 +119,9 @@ owner_admin_can_get_node:
path: '/v1/nodes'
method: get
headers: *owner_admin_headers
- assert_list_length: 2
+ assert_list_length:
+ nodes: 2
assert_status: 200
- skip_reason: policy not implemented
owner_member_can_get_node:
path: '/v1/nodes'
@@ -135,16 +130,14 @@ owner_member_can_get_node:
assert_list_length:
nodes: 2
assert_status: 200
- skip_reason: policy not implemented
owner_reader_can_get_node:
path: '/v1/nodes'
method: get
- headers: *owner_admin_headers
+ headers: *owner_reader_headers
assert_list_length:
nodes: 2
assert_status: 200
- skip_reason: policy not implemented
lessee_admin_can_get_node:
path: '/v1/nodes'
@@ -153,7 +146,6 @@ lessee_admin_can_get_node:
assert_list_length:
nodes: 1
assert_status: 200
- skip_reason: policy not implemented
lessee_member_can_get_node:
path: '/v1/nodes'
@@ -162,16 +154,14 @@ lessee_member_can_get_node:
assert_list_length:
nodes: 1
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_get_node:
path: '/v1/nodes'
method: get
headers: *lessee_reader_headers
- assert_list_length:
+ assert_list_length:
nodes: 1
assert_status: 200
- skip_reason: policy not implemented
# Tests that no nodes are associated and thus the API
# should return an empty list.
@@ -182,59 +172,102 @@ third_party_admin_cannot_get_node:
assert_list_length:
nodes: 0
assert_status: 200
- skip_reason: policy not implemented
# Based on nodes_get_node_admin
-owner_reader_cant_get_other_node:
+owner_reader_can_get_their_node:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: get
+ headers: *owner_reader_headers
+ assert_status: 200
+
+owner_reader_cannot_get_other_node:
+ # Not the owner's node, one they cannot
+ # see.
path: '/v1/nodes/{node_ident}'
method: get
headers: *owner_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
+
+lessee_reader_can_get_their_node:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: get
+ headers: *lessee_reader_headers
+ assert_status: 200
lessee_reader_cant_get_other_node:
+ # Not the lessee's node, one which
+ # exists but that they cannot see.
path: '/v1/nodes/{node_ident}'
method: get
headers: *owner_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
third_party_admin_cant_get_node:
path: '/v1/nodes/{node_ident}'
method: get
headers: *owner_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
+
+# Node body filter thresholds before detailed listing
+# Represents checks for baremetal:node:get:filter_threshold
+# which means anyone who is NOT a SYSTEM_READER by default
+# will have additional checks examine if they can view fields.
+
+owner_reader_can_get_restricted_fields:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: get
+ headers: *owner_reader_headers
+ assert_status: 200
+ assert_dict_contains:
+ last_error: 'meow'
+ reservation: 'lolcats'
+ driver_internal_info:
+ private_state: "secret value"
+ driver_info:
+ foo: "bar"
+ fake_password: "******"
+
+lessee_reader_cannot_get_restricted_fields:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: get
+ headers: *lessee_reader_headers
+ assert_status: 200
+ assert_dict_contains:
+ last_error: "** Value Redacted - Requires baremetal:node:get:last_error permission. **"
+ reservation: "** Redacted - requires baremetal:node:get:reservation permission. **"
+ driver_internal_info:
+ content: '** Redacted - Requires baremetal:node:get:driver_internal_info permission. **'
+ driver_info:
+ content: '** Redacted - requires baremetal:node:get:driver_info permission. **'
owner_reader_can_get_detail:
- path: '/v1/nodes/details'
+ path: '/v1/nodes/detail'
method: get
headers: *owner_reader_headers
assert_list_length:
nodes: 2
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_get_detail:
- path: '/v1/nodes/details'
+ path: '/v1/nodes/detail'
method: get
headers: *lessee_reader_headers
assert_list_length:
nodes: 1
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_get_detail:
- path: '/v1/nodesi/details'
+ path: '/v1/nodes/detail'
method: get
headers: *third_party_admin_headers
assert_list_length:
nodes: 0
assert_status: 200
- skip_reason: policy not implemented
-owner_admin_can_patch_node:
+# Node /extra is baremetal:node:update_extra
+
+owner_admin_can_patch_node_extra:
path: '/v1/nodes/{owner_node_ident}'
method: patch
headers: *owner_admin_headers
@@ -243,76 +276,293 @@ owner_admin_can_patch_node:
path: /extra
value: {'test': 'testing'}
assert_status: 503
- skip_reason: policy not implemented
-owner_member_can_patch_node:
+owner_member_can_patch_node_extra:
path: '/v1/nodes/{owner_node_ident}'
method: patch
headers: *owner_member_headers
body: *extra_patch
assert_status: 503
- skip_reason: policy not implemented
-owner_reader_cannot_patch_node:
+owner_reader_cannot_patch_node_extra:
path: '/v1/nodes/{owner_node_ident}'
method: patch
headers: *owner_reader_headers
body: *extra_patch
assert_status: 403
- skip_reason: policy not implemented
-lessee_admin_can_patch_node:
+lessee_admin_can_patch_node_extra:
path: '/v1/nodes/{lessee_node_ident}'
method: patch
headers: *lessee_admin_headers
body: *extra_patch
assert_status: 503
- skip_reason: policy not implemented
-lessee_member_can_patch_node:
+lessee_member_can_patch_node_extra:
path: '/v1/nodes/{lessee_node_ident}'
method: patch
headers: *lessee_member_headers
body: *extra_patch
assert_status: 503
- skip_reason: policy not implemented
-lessee_reader_cannot_patch_node:
+lessee_reader_cannot_patch_node_extra:
path: '/v1/nodes/{lessee_node_ident}'
method: patch
headers: *lessee_reader_headers
body: *extra_patch
assert_status: 403
- skip_reason: policy not implemented
-third_party_admin_cannot_patch_node:
+third_party_admin_cannot_patch_node_extra:
path: '/v1/nodes/{owner_node_ident}'
method: patch
headers: *third_party_admin_headers
body: *extra_patch
assert_status: 404
- skip_reason: policy not implemented
+
+owner_admin_can_change_drivers:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_admin_headers
+ body:
+ - op: replace
+ path: /driver
+ value: fake-hardware
+ - op: replace
+ path: /power_interface
+ value: fake
+ assert_status: 503
+
+owner_member_can_patch_all_the_things:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_member_headers
+ body: &patch_all_the_things
+ - op: replace
+ path: /instance_info
+ value: {'test': 'testing'}
+ - op: replace
+ path: /driver_info
+ value: {'test': 'testing'}
+ - op: replace
+ path: /properties
+ value: {'test': 'testing'}
+ - op: replace
+ path: /network_data
+ value:
+ links: []
+ networks: []
+ services: []
+ - op: replace
+ path: /name
+ value: 'meow-node-1'
+ - op: replace
+ path: /retired
+ value: true
+ - op: replace
+ path: /retired_reason
+ value: "43"
+ assert_status: 503
+
+owner_member_can_change_lessee:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_admin_headers
+ assert_status: 503
+ body:
+ - op: replace
+ path: /lessee
+ value: "198566a5-a609-4463-9800-e8920be7c2fa"
+
+lessee_admin_cannot_change_lessee:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_admin_headers
+ assert_status: 403
+ body:
+ - op: replace
+ path: /lessee
+ value: "1234"
+
+lessee_admin_cannot_change_owner:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_admin_headers
+ body:
+ - op: replace
+ path: /owner
+ value: "1234"
+ assert_status: 403
+
+owner_admin_can_change_lessee:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_admin_headers
+ body:
+ - op: replace
+ path: /lessee
+ value: "1234"
+ assert_status: 503
+
+owner_admin_cannot_change_owner:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_admin_headers
+ body:
+ - op: replace
+ path: /owner
+ value: "1234"
+ assert_status: 403
+
+# This is not an explicitly restricted item, it falls
+# to generalized update capability, which oddly makes
+# a lot of sense in this case. It is a flag to prevent
+# accidential erasure/removal of the node.
+
+lessee_member_can_set_protected:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_member_headers
+ body:
+ - op: replace
+ path: /protected
+ value: true
+ assert_status: 503
+
+lessee_member_cannot_patch_instance_info:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_member_headers
+ body:
+ - op: replace
+ path: /instance_info
+ value: {'test': 'testing'}
+ assert_status: 403
+
+lessee_member_cannot_patch_driver_info:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_member_headers
+ body:
+ - op: replace
+ path: /driver_info
+ value: {'test': 'testing'}
+ assert_status: 403
+
+lessee_member_cannot_patch_properties:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_member_headers
+ body:
+ - op: replace
+ path: /properties
+ value: {'test': 'testing'}
+ assert_status: 403
+
+lessee_member_cannot_patch_network_data:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_member_headers
+ body:
+ - op: replace
+ path: /network_data
+ value:
+ links: []
+ networks: []
+ services: []
+ assert_status: 403
+
+lessee_member_cannot_patch_name:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_member_headers
+ body:
+ - op: replace
+ path: /name
+ value: 'meow-node-1'
+ assert_status: 403
+
+lessee_member_cannot_patch_retired:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_member_headers
+ body:
+ - op: replace
+ path: /retired
+ value: true
+ - op: replace
+ path: /retired_reason
+ value: "43"
+ assert_status: 403
+
+owner_admin_can_patch_node_instance_info:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_admin_headers
+ body: &instance_info_patch
+ - op: replace
+ path: /instance_info
+ value: {'test': 'testing'}
+ assert_status: 503
+
+owner_member_can_patch_node_instance_info:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_member_headers
+ body: *instance_info_patch
+ assert_status: 503
+
+owner_reader_can_patch_node_instance_info:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_reader_headers
+ body: *instance_info_patch
+ assert_status: 403
+
+lessee_admin_can_patch_node_instance_info:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_admin_headers
+ body: *instance_info_patch
+ assert_status: 503
+
+lessee_member_cannot_patch_node_instance_info:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_member_headers
+ body: *instance_info_patch
+ assert_status: 403
+
+lessee_reader_can_patch_node_instance_info:
+ path: '/v1/nodes/{lessee_node_ident}'
+ method: patch
+ headers: *lessee_reader_headers
+ body: *instance_info_patch
+ assert_status: 403
+
+third_party_admin_cannot_patch_node_instance_info:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *third_party_admin_headers
+ body: *instance_info_patch
+ assert_status: 404
owner_admin_cannot_delete_nodes:
path: '/v1/nodes/{owner_node_ident}'
method: delete
headers: *owner_admin_headers
- assert_status: 204
- skip_reason: policy not implemented
+ assert_status: 403
lessee_admin_cannot_delete_nodes:
- path: '/v1/nodes/{owner_node_ident}'
+ path: '/v1/nodes/{lessee_node_ident}'
method: delete
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_delete_nodes:
path: '/v1/nodes/{owner_node_ident}'
method: delete
headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
# TODO(TheJulia): Specific field restrictions based on permissions,
# are in the spec, but still need to be implemented test wise.
@@ -328,42 +578,36 @@ owner_admin_can_validate_node:
method: get
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_can_validate_node:
path: '/v1/nodes/{lessee_node_ident}/validate'
method: get
headers: *lessee_admin_headers
assert_status: 503
- skip_reason: policy not implemented
owner_member_can_validate_node:
path: '/v1/nodes/{owner_node_ident}/validate'
method: get
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_member_cannot_validate_node:
path: '/v1/nodes/{lessee_node_ident}/validate'
method: get
- headers: *lessee_admin_headers
+ headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_validate_node:
path: '/v1/nodes/{owner_node_ident}/validate'
method: get
headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
owner_admin_can_set_maintenance:
path: '/v1/nodes/{owner_node_ident}/maintenance'
method: put
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
# should we really allow this? they could desync with nova if they can do this...
lessee_admin_can_set_maintenance:
@@ -371,63 +615,54 @@ lessee_admin_can_set_maintenance:
method: put
headers: *lessee_admin_headers
assert_status: 503
- skip_reason: policy not implemented
owner_member_can_set_maintenance:
path: '/v1/nodes/{owner_node_ident}/maintenance'
method: put
headers: *owner_member_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_member_cannot_set_maintenance:
path: '/v1/nodes/{lessee_node_ident}/maintenance'
method: put
headers: *lessee_member_headers
- assert_status: 503
- skip_reason: policy not implemented
+ assert_status: 403
third_party_admin_cannot_set_maintenance:
path: '/v1/nodes/{owner_node_ident}/maintenance'
method: put
headers: *third_party_admin_headers
- assert_status: 503
- skip_reason: policy not implemented
+ assert_status: 404
owner_admin_can_unset_maintenance:
path: '/v1/nodes/{owner_node_ident}/maintenance'
method: delete
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_can_unset_maintenance:
path: '/v1/nodes/{lessee_node_ident}/maintenance'
method: delete
headers: *lessee_admin_headers
assert_status: 503
- skip_reason: policy not implemented
owner_member_can_unset_maintnenance:
path: '/v1/nodes/{owner_node_ident}/maintenance'
method: delete
headers: *owner_member_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_member_cannot_unset_maintenance:
path: '/v1/nodes/{lessee_node_ident}/maintenance'
method: delete
headers: *lessee_member_headers
- assert_status: 503
- skip_reason: policy not implemented
+ assert_status: 403
third_party_admin_cannot_unset_maintenance:
path: '/v1/nodes/{node_ident}/maintenance'
method: delete
headers: *third_party_admin_headers
- assert_status: 404 # 404 may not be the right code here...
- skip_reason: policy not implemented
+ assert_status: 404
# Get/set supported boot devices
@@ -438,31 +673,27 @@ owner_admin_can_set_boot_device:
body: &boot_device_body
boot_device: pxe
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_cannot_set_boot_device:
- path: '/v1/nodes/{owner_node_ident}/management/boot_device'
+ path: '/v1/nodes/{lessee_node_ident}/management/boot_device'
method: put
headers: *lessee_admin_headers
body: *boot_device_body
assert_status: 403
- skip_reason: policy not implemented
-owner_member_can_set_boot_device:
+owner_member_cannot_set_boot_device:
path: '/v1/nodes/{owner_node_ident}/management/boot_device'
method: put
headers: *owner_member_headers
body: *boot_device_body
- assert_status: 503
- skip_reason: policy not implemented
+ assert_status: 403
lessee_member_cannot_set_boot_device:
- path: '/v1/nodes/{owner_node_ident}/management/boot_device'
+ path: '/v1/nodes/{lessee_node_ident}/management/boot_device'
method: put
headers: *lessee_member_headers
body: *boot_device_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_set_boot_device:
path: '/v1/nodes/{owner_node_ident}/management/boot_device'
@@ -470,110 +701,95 @@ third_party_admin_cannot_set_boot_device:
headers: *third_party_admin_headers
body: *boot_device_body
assert_status: 404
- skip_reason: policy not implemented
owner_admin_can_get_boot_device:
path: '/v1/nodes/{owner_node_ident}/management/boot_device'
method: get
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_cannot_get_boot_device:
path: '/v1/nodes/{lessee_node_ident}/management/boot_device'
method: get
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
-owner_member_can_get_boot_device:
+owner_member_cannot_get_boot_device:
path: '/v1/nodes/{owner_node_ident}/management/boot_device'
method: get
headers: *owner_member_headers
- assert_status: 503
- skip_reason: policy not implemented
+ assert_status: 403
lessee_member_cannot_get_boot_device:
path: '/v1/nodes/{lessee_node_ident}/management/boot_device'
method: get
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
owner_reader_cannot_get_boot_device:
path: '/v1/nodes/{owner_node_ident}/management/boot_device'
method: get
headers: *owner_reader_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_reader_cannot_get_boot_device:
path: '/v1/nodes/{lessee_node_ident}/management/boot_device'
method: get
headers: *lessee_reader_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_get_boot_device:
path: '/v1/nodes/{node_ident}/management/boot_device'
method: get
- headers: *owner_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ headers: *third_party_admin_headers
+ assert_status: 404
owner_admin_can_get_supported_boot_devices:
path: '/v1/nodes/{owner_node_ident}/management/boot_device/supported'
method: get
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
-owner_member_can_get_supported_boot_devices:
+owner_member_cannot_get_supported_boot_devices:
path: '/v1/nodes/{owner_node_ident}/management/boot_device/supported'
method: get
- headers: *owner_reader_headers
- assert_status: 503
- skip_reason: policy not implemented
+ headers: *owner_member_headers
+ assert_status: 403
lessee_admin_cannot_get_supported_boot_devices:
path: '/v1/nodes/{lessee_node_ident}/management/boot_device/supported'
method: get
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_get_supported_boot_devices:
path: '/v1/nodes/{owner_node_ident}/management/boot_device/supported'
method: get
- headers: *owner_reader_headers
+ headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
# Non masking interrupt
owner_admin_can_send_non_masking_interrupt:
- path: '/v1/nodes/{node_ident}/management/inject_nmi'
+ path: '/v1/nodes/{owner_node_ident}/management/inject_nmi'
method: put
- headers: *third_party_admin_headers
+ headers: *owner_admin_headers
body: {}
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_cannot_send_non_masking_interrupt:
- path: '/v1/nodes/{node_ident}/management/inject_nmi'
+ path: '/v1/nodes/{lessee_node_ident}/management/inject_nmi'
method: put
- headers: *third_party_admin_headers
+ headers: *lessee_admin_headers
body: {}
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_send_non_masking_interrupt:
path: '/v1/nodes/{node_ident}/management/inject_nmi'
method: put
headers: *third_party_admin_headers
body: {}
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# States
@@ -582,21 +798,18 @@ owner_reader_get_states:
method: get
headers: *owner_admin_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_get_states:
path: '/v1/nodes/{lessee_node_ident}/states'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_part_admin_cannot_get_states:
path: '/v1/nodes/{node_ident}/states'
method: get
headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
# Power states
@@ -607,7 +820,6 @@ owner_admin_can_put_power_state_change:
body: &power_body
target: "power on"
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_can_put_power_state_change:
path: '/v1/nodes/{lessee_node_ident}/states/power'
@@ -615,7 +827,6 @@ lessee_admin_can_put_power_state_change:
headers: *lessee_admin_headers
body: *power_body
assert_status: 503
- skip_reason: policy not implemented
owner_member_can_put_power_state_change:
path: '/v1/nodes/{owner_node_ident}/states/power'
@@ -623,7 +834,6 @@ owner_member_can_put_power_state_change:
headers: *owner_member_headers
body: *power_body
assert_status: 503
- skip_reason: policy not implemented
lessee_member_can_put_power_state_change:
path: '/v1/nodes/{lessee_node_ident}/states/power'
@@ -631,7 +841,6 @@ lessee_member_can_put_power_state_change:
headers: *lessee_member_headers
body: *power_body
assert_status: 503
- skip_reason: policy not implemented
owner_reader_cannot_put_power_state_change:
path: '/v1/nodes/{owner_node_ident}/states/power'
@@ -639,7 +848,6 @@ owner_reader_cannot_put_power_state_change:
headers: *owner_reader_headers
body: *power_body
assert_status: 403
- skip_reason: policy not implemented
lessee_reader_cannot_put_power_state_change:
path: '/v1/nodes/{lessee_node_ident}/states/power'
@@ -647,7 +855,6 @@ lessee_reader_cannot_put_power_state_change:
headers: *lessee_reader_headers
body: *power_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_put_power_state_change:
path: '/v1/nodes/{node_ident}/states/power'
@@ -655,7 +862,6 @@ third_party_admin_cannot_put_power_state_change:
headers: *third_party_admin_headers
body: *power_body
assert_status: 404
- skip_reason: policy not implemented
# Provision states
@@ -666,7 +872,6 @@ owner_admin_can_change_provision_state:
body: &provision_body
target: deploy
assert_status: 503
- skip_reason: policy not implemented
owner_member_can_change_provision_state:
path: '/v1/nodes/{owner_node_ident}/states/provision'
@@ -674,7 +879,6 @@ owner_member_can_change_provision_state:
headers: *owner_member_headers
body: *provision_body
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_can_change_provision_state:
path: '/v1/nodes/{lessee_node_ident}/states/provision'
@@ -682,7 +886,6 @@ lessee_admin_can_change_provision_state:
headers: *lessee_admin_headers
body: *provision_body
assert_status: 503
- skip_reason: policy not implemented
lessee_member_cannot_change_provision_state:
path: '/v1/nodes/{lessee_node_ident}/states/provision'
@@ -690,7 +893,6 @@ lessee_member_cannot_change_provision_state:
headers: *lessee_member_headers
body: *provision_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_change_provision_state:
path: '/v1/nodes/{owner_node_ident}/states/provision'
@@ -698,7 +900,6 @@ third_party_admin_cannot_change_provision_state:
headers: *lessee_member_headers
body: *provision_body
assert_status: 404
- skip_reason: policy not implemented
# Raid configuration
@@ -713,7 +914,6 @@ owner_admin_can_set_raid_config:
is_root_volume: true
raid_level: 1
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_cannot_set_raid_config:
path: '/v1/nodes/{lessee_node_ident}/states/raid'
@@ -721,15 +921,13 @@ lessee_admin_cannot_set_raid_config:
headers: *lessee_admin_headers
body: *raid_body
assert_status: 403
- skip_reason: policy not implemented
owner_member_can_set_raid_config:
path: '/v1/nodes/{lessee_node_ident}/states/raid'
method: put
headers: *owner_member_headers
body: *raid_body
- assert_status: 504
- skip_reason: policy not implemented
+ assert_status: 503
lessee_member_cannot_set_raid_config:
path: '/v1/nodes/{lessee_node_ident}/states/raid'
@@ -737,15 +935,13 @@ lessee_member_cannot_set_raid_config:
headers: *lessee_admin_headers
body: *raid_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_set_raid_config:
path: '/v1/nodes/{lessee_node_ident}/states/raid'
method: put
headers: *third_party_admin_headers
body: *raid_body
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Console
@@ -754,82 +950,71 @@ owner_admin_can_get_console:
method: get
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_cannot_get_console:
path: '/v1/nodes/{lessee_node_ident}/states/console'
method: get
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
owner_member_can_get_console:
path: '/v1/nodes/{owner_node_ident}/states/console'
method: get
headers: *owner_member_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_member_cannot_get_console:
path: '/v1/nodes/{lessee_node_ident}/states/console'
method: get
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
owner_reader_cannot_get_console:
path: '/v1/nodes/{owner_node_ident}/states/console'
method: get
headers: *owner_reader_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_reader_cannot_get_console:
path: '/v1/nodes/{lessee_node_ident}/states/console'
method: get
headers: *lessee_reader_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_get_console:
path: '/v1/nodes/{lessee_node_ident}/states/console'
method: get
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
owner_admin_can_set_console:
- path: '/v1/nodes/{node_ident}/states/console'
+ path: '/v1/nodes/{owner_node_ident}/states/console'
method: put
headers: *owner_admin_headers
body: &console_body_put
enabled: true
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_cannot_set_console:
- path: '/v1/nodes/{node_ident}/states/console'
+ path: '/v1/nodes/{lessee_node_ident}/states/console'
method: put
headers: *lessee_admin_headers
body: *console_body_put
assert_status: 403
- skip_reason: policy not implemented
owner_member_can_set_console:
- path: '/v1/nodes/{node_ident}/states/console'
+ path: '/v1/nodes/{owner_node_ident}/states/console'
method: put
headers: *owner_member_headers
body: *console_body_put
assert_status: 503
- skip_reason: policy not implemented
lessee_member_cannot_set_console:
- path: '/v1/nodes/{node_ident}/states/console'
+ path: '/v1/nodes/{lessee_node_ident}/states/console'
method: put
headers: *lessee_member_headers
body: *console_body_put
assert_status: 403
- skip_reason: policy not implemented
# Vendor Passthru - https://docs.openstack.org/api-ref/baremetal/?expanded=#node-vendor-passthru-nodes
@@ -842,42 +1027,36 @@ owner_admin_cannot_get_vendor_passthru_methods:
method: get
headers: *owner_admin_headers
assert_status: 403
- skip_reason: policy not implemented
owner_member_cannot_get_vendor_passthru_methods:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru/methods'
method: get
headers: *owner_member_headers
assert_status: 403
- skip_reason: policy not implemented
owner_reader_cannot_get_vendor_passthru_methods:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru/methods'
method: get
headers: *owner_reader_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_get_vendor_passthru_methods:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru/methods'
method: get
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_get_vendor_passthru_methods:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru/methods'
method: get
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_reader_cannot_get_vendor_passthru_methods:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru/methods'
method: get
headers: *lessee_reader_headers
assert_status: 403
- skip_reason: policy not implemented
# Get vendor passthru method tests
owner_admin_cannot_get_vendor_passthru:
@@ -885,42 +1064,36 @@ owner_admin_cannot_get_vendor_passthru:
method: get
headers: *owner_admin_headers
assert_status: 403
- skip_reason: policy not implemented
owner_member_cannot_get_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: get
headers: *owner_member_headers
assert_status: 403
- skip_reason: policy not implemented
owner_reader_cannot_get_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: get
headers: *owner_reader_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_get_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: get
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_get_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: get
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_reader_cannot_get_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: get
headers: *lessee_reader_headers
assert_status: 403
- skip_reason: policy not implemented
# Post vendor passthru method tests
@@ -929,42 +1102,36 @@ owner_admin_cannot_post_vendor_passthru:
method: post
headers: *owner_admin_headers
assert_status: 403
- skip_reason: policy not implemented
owner_member_cannot_post_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: post
headers: *owner_member_headers
assert_status: 403
- skip_reason: policy not implemented
owner_reader_cannot_post_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: post
headers: *owner_reader_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_post_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: post
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_post_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: post
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_reader_cannot_post_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: post
headers: *lessee_reader_headers
assert_status: 403
- skip_reason: policy not implemented
# Put vendor passthru method tests
@@ -973,42 +1140,36 @@ owner_admin_cannot_put_vendor_passthru:
method: put
headers: *owner_admin_headers
assert_status: 403
- skip_reason: policy not implemented
owner_member_cannot_put_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: put
headers: *owner_member_headers
assert_status: 403
- skip_reason: policy not implemented
owner_reader_cannot_put_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: put
headers: *owner_reader_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_put_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: put
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_put_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: put
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_reader_cannot_put_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: put
headers: *lessee_reader_headers
assert_status: 403
- skip_reason: policy not implemented
# Delete vendor passthru methods tests
@@ -1017,42 +1178,36 @@ owner_admin_cannot_delete_vendor_passthru:
method: delete
headers: *owner_admin_headers
assert_status: 403
- skip_reason: policy not implemented
owner_member_cannot_delete_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: delete
headers: *owner_member_headers
assert_status: 403
- skip_reason: policy not implemented
owner_reader_cannot_delete_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: delete
headers: *owner_reader_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_delete_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: delete
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_delete_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: delete
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_reader_cannot_delete_vendor_passthru:
path: '/v1/nodes/{lessee_node_ident}/vendor_passthru?method=test'
method: delete
headers: *lessee_reader_headers
assert_status: 403
- skip_reason: policy not implemented
# Node Traits - https://docs.openstack.org/api-ref/baremetal/#node-traits-nodes
@@ -1061,21 +1216,18 @@ owner_reader_get_traits:
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_get_traits:
path: '/v1/nodes/{lessee_node_ident}/traits'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_get_traits:
path: '/v1/nodes/{lessee_node_ident}/traits'
method: get
headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
owner_admin_can_put_traits:
path: '/v1/nodes/{owner_node_ident}/traits'
@@ -1086,15 +1238,13 @@ owner_admin_can_put_traits:
traits:
- CUSTOM_TRAIT1
- HW_CPU_X86_VMX
- skip_reason: policy not implemented
-owner_member_can_put_traits:
+owner_member_cannot_put_traits:
path: '/v1/nodes/{owner_node_ident}/traits'
method: put
headers: *owner_member_headers
- assert_status: 503
+ assert_status: 403
body: *traits_body
- skip_reason: policy not implemented
lessee_admin_cannot_put_traits:
path: '/v1/nodes/{lessee_node_ident}/traits'
@@ -1102,7 +1252,6 @@ lessee_admin_cannot_put_traits:
headers: *owner_member_headers
assert_status: 403
body: *traits_body
- skip_reason: policy not implemented
lessee_member_cannot_put_traits:
path: '/v1/nodes/{lessee_node_ident}/traits'
@@ -1110,85 +1259,73 @@ lessee_member_cannot_put_traits:
headers: *lessee_member_headers
assert_status: 403
body: *traits_body
- skip_reason: policy not implemented
third_party_admin_cannot_put_traits:
path: '/v1/nodes/{lessee_node_ident}/traits'
method: put
headers: *third_party_admin_headers
- assert_status: 403
+ assert_status: 404
body: *traits_body
- skip_reason: policy not implemented
owner_admin_can_delete_traits:
path: '/v1/nodes/{owner_node_ident}/traits/{trait}'
method: delete
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
-owner_member_can_delete_traits:
+owner_member_cannot_delete_traits:
path: '/v1/nodes/{owner_node_ident}/traits/{trait}'
method: delete
- headers: *owner_admin_headers
- assert_status: 503
- skip_reason: policy not implemented
+ headers: *owner_member_headers
+ assert_status: 403
lessee_admin_cannot_delete_traits:
path: '/v1/nodes/{lessee_node_ident}/traits/{trait}'
method: delete
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_delete_traits:
path: '/v1/nodes/{lessee_node_ident}/traits/{trait}'
method: delete
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_delete_traits:
path: '/v1/nodes/{lessee_node_ident}/traits/{trait}'
method: delete
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
owner_admin_can_put_custom_traits:
path: '/v1/nodes/{owner_node_ident}/traits/CUSTOM_TRAIT2'
method: put
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
-owner_member_can_put_custom_traits:
+owner_member_cannot_put_custom_traits:
path: '/v1/nodes/{owner_node_ident}/traits/CUSTOM_TRAIT2'
method: put
headers: *owner_member_headers
- assert_status: 503
- skip_reason: policy not implemented
+ assert_status: 403
lessee_admin_cannot_put_custom_traits:
path: '/v1/nodes/{lessee_node_ident}/traits/CUSTOM_TRAIT2'
method: put
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_put_custom_traits:
path: '/v1/nodes/{lessee_node_ident}/traits/CUSTOM_TRAIT2'
method: put
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_put_custom_traits:
path: '/v1/nodes/{lessee_node_ident}/traits/CUSTOM_TRAIT2'
method: put
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# VIFS - https://docs.openstack.org/api-ref/baremetal/#vifs-virtual-interfaces-of-nodes
# TODO(TheJulia): VIFS will need fairly exhaustive testing given the use path.
@@ -1201,21 +1338,18 @@ owner_reader_get_vifs:
method: get
headers: *owner_reader_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_reader_get_vifs:
path: '/v1/nodes/{lessee_node_ident}/vifs'
method: get
headers: *lessee_reader_headers
assert_status: 503
- skip_reason: policy not implemented
third_party_admin_cannot_get_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs'
method: get
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
owner_admin_can_post_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs'
@@ -1224,7 +1358,6 @@ owner_admin_can_post_vifs:
assert_status: 503
body: &vif_body
id: ee21d58f-5de2-4956-85ff-33935ea1ca00
- skip_reason: policy not implemented
lessee_admin_can_post_vifs:
path: '/v1/nodes/{lessee_node_ident}/vifs'
@@ -1232,7 +1365,6 @@ lessee_admin_can_post_vifs:
headers: *lessee_admin_headers
assert_status: 503
body: *vif_body
- skip_reason: policy not implemented
owner_member_can_post_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs'
@@ -1240,7 +1372,6 @@ owner_member_can_post_vifs:
headers: *owner_admin_headers
assert_status: 503
body: *vif_body
- skip_reason: policy not implemented
lessee_member_cannot_post_vifs:
path: '/v1/nodes/{lessee_node_ident}/vifs'
@@ -1248,15 +1379,13 @@ lessee_member_cannot_post_vifs:
headers: *lessee_member_headers
assert_status: 403
body: *vif_body
- skip_reason: policy not implemented
owner_reader_cannot_post_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs'
method: post
- headers: *lessee_member_headers
+ headers: *owner_reader_headers
assert_status: 403
body: *vif_body
- skip_reason: policy not implemented
lessee_reader_cannot_post_vifs:
path: '/v1/nodes/{lessee_node_ident}/vifs'
@@ -1264,107 +1393,94 @@ lessee_reader_cannot_post_vifs:
headers: *lessee_reader_headers
assert_status: 403
body: *vif_body
- skip_reason: policy not implemented
third_party_admin_cannot_post_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs'
method: post
headers: *third_party_admin_headers
- assert_status: 403
+ assert_status: 404
body: *vif_body
- skip_reason: policy not implemented
owner_admin_delete_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
method: delete
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_can_delete_vifs:
path: '/v1/nodes/{lessee_node_ident}/vifs/{vif_ident}'
method: delete
headers: *lessee_admin_headers
assert_status: 503
- skip_reason: policy not implemented
owner_member_can_delete_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
method: delete
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
lessee_member_cannot_delete_vifs:
path: '/v1/nodes/{lessee_node_ident}/vifs/{vif_ident}'
method: delete
- headers: *lessee_admin_headers
+ headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_delete_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
method: delete
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Indicators - https://docs.openstack.org/api-ref/baremetal/#indicators-management
owner_readers_can_get_indicators:
path: '/v1/nodes/{owner_node_ident}/management/indicators'
method: get
headers: *owner_reader_headers
- assert_status: 200
- skip_reason: policy not implemented
+ assert_status: 503
lesse_readers_can_get_indicators:
path: '/v1/nodes/{lessee_node_ident}/management/indicators'
method: get
headers: *lessee_reader_headers
- assert_status: 200
- skip_reason: policy not implemented
+ assert_status: 503
third_party_admin_cannot_get_indicators:
path: '/v1/nodes/{owner_node_ident}/management/indicators'
method: get
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
owner_reader_can_get_indicator_status:
- path: '/v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident}'
+ path: '/v1/nodes/{owner_node_ident}/management/indicators/{ind_component}/{ind_ident}'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
+ skip_reason: API appears to be broken and should be patched outside of this work.
-lessee_reader_can_get_indicator_status:
- path: '/v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident}'
+lessee_reader_not_get_indicator_status:
+ path: '/v1/nodes/{lessee_node_ident}/management/indicators/{ind_component}/{ind_ident}'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
+ skip_reason: API appears to be broken and should be patched outside of this work.
owner_member_can_set_indicator:
- path: '/v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident}'
+ path: '/v1/nodes/{owner_node_ident}/management/indicators/{ind_component}/{ind_ident}'
method: put
- headers: *owner_reader_headers
- assert_status: 201
- skip_reason: policy not implemented
+ headers: *owner_member_headers
+ assert_status: 503
-lessee_member_can_set_indicator:
- path: '/v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident}'
+lessee_member_cannot_set_indicator:
+ path: '/v1/nodes/{lessee_node_ident}/management/indicators/{ind_component}/{ind_ident}'
method: put
- headers: *lessee_reader_headers
- assert_status: 201
- skip_reason: policy not implemented
+ headers: *lessee_member_headers
+ assert_status: 403
third_party_admin_cannot_set_indicator:
- path: '/v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident}'
+ path: '/v1/nodes/{node_ident}/management/indicators/{ind_component}/{ind_ident}'
method: put
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Portgroups - https://docs.openstack.org/api-ref/baremetal/#portgroups-portgroups
@@ -1375,44 +1491,42 @@ owner_reader_can_list_portgroups:
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
+ assert_list_length:
+ portgroups: 2
lessee_reader_can_list_portgroups:
path: '/v1/portgroups'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
+ assert_list_length:
+ portgroups: 1
third_party_admin_cannot_list_portgroups:
path: '/v1/portgroups'
method: get
headers: *third_party_admin_headers
- assert_status: 99 # TODO This should be 200
+ assert_status: 200
assert_list_length:
portgroups: 0
- skip_reason: policy not implemented
owner_reader_can_read_portgroup:
path: '/v1/portgroups/{owner_portgroup_ident}'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_read_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_read_portgroup:
- path: '/v1/portgroups/{portgroup_ident}'
+ path: '/v1/portgroups/{owner_portgroup_ident}'
method: get
headers: *third_party_admin_headers
- assert_status: 200
- skip_reason: policy not implemented
+ assert_status: 404
# NB: Ports have to be posted with a node UUID to associate to,
# so that seems policy-check-able.
@@ -1421,9 +1535,8 @@ owner_admin_can_add_portgroup:
method: post
headers: *owner_admin_headers
body: &owner_portgroup_body
- node_uuid: 18a552fb-dcd2-43bf-9302-e4c93287be11
+ node_uuid: 1ab63b9e-66d7-4cd7-8618-dddd0f9f7881
assert_status: 201
- skip_reason: policy not implemented
owner_member_cannot_add_portgroup:
path: '/v1/portgroups'
@@ -1431,16 +1544,14 @@ owner_member_cannot_add_portgroup:
headers: *owner_member_headers
body: *owner_portgroup_body
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_add_portgroup:
path: '/v1/portgroups'
method: post
headers: *lessee_admin_headers
body: &lessee_portgroup_body
- node_uuid: 18a552fb-dcd2-43bf-9302-e4c93287be11
+ node_uuid: 38d5abed-c585-4fce-a57e-a2ffc2a2ec6f
assert_status: 403
- skip_reason: policy not implemented
# TODO, likely will need separate port/port groups established for the tests
@@ -1450,7 +1561,6 @@ lessee_member_cannot_add_portgroup:
headers: *lessee_member_headers
body: *lessee_portgroup_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_add_portgroup:
path: '/v1/portgroups'
@@ -1458,7 +1568,6 @@ third_party_admin_cannot_add_portgroup:
headers: *third_party_admin_headers
body: *lessee_portgroup_body
assert_status: 403
- skip_reason: policy not implemented
owner_admin_can_modify_portgroup:
path: '/v1/portgroups/{owner_portgroup_ident}'
@@ -1469,15 +1578,13 @@ owner_admin_can_modify_portgroup:
path: /extra
value: {'test': 'testing'}
assert_status: 503
- skip_reason: policy not implemented
-owner_member_can_modify_portgroup:
+owner_member_cannot_modify_portgroup:
path: '/v1/portgroups/{owner_portgroup_ident}'
method: patch
headers: *owner_member_headers
body: *portgroup_patch_body
- assert_status: 503
- skip_reason: policy not implemented
+ assert_status: 403
lessee_admin_cannot_modify_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
@@ -1485,7 +1592,6 @@ lessee_admin_cannot_modify_portgroup:
headers: *lessee_admin_headers
body: *portgroup_patch_body
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_modify_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
@@ -1493,50 +1599,43 @@ lessee_member_cannot_modify_portgroup:
headers: *lessee_member_headers
body: *portgroup_patch_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_modify_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
method: patch
headers: *third_party_admin_headers
body: *portgroup_patch_body
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
owner_admin_can_delete_portgroup:
path: '/v1/portgroups/{owner_portgroup_ident}'
method: delete
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
owner_member_cannot_delete_portgroup:
path: '/v1/portgroups/{owner_portgroup_ident}'
method: delete
headers: *owner_member_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_delete_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
method: delete
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_delete_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
method: delete
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_delete_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
method: delete
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Portgroups by node - https://docs.openstack.org/api-ref/baremetal/#listing-portgroups-by-node-nodes-portgroups
@@ -1544,22 +1643,19 @@ owner_reader_can_get_node_portgroups:
path: '/v1/nodes/{owner_node_ident}/portgroups'
method: get
headers: *owner_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 200
lessee_reader_can_get_node_porgtroups:
path: '/v1/nodes/{lessee_node_ident}/portgroups'
method: get
headers: *lessee_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 200
third_party_admin_cannot_get_portgroups:
path: '/v1/nodes/{lessee_node_ident}/portgroups'
method: get
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Ports - https://docs.openstack.org/api-ref/baremetal/#ports-ports
@@ -1570,43 +1666,43 @@ owner_reader_can_list_ports:
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
+ # Two ports owned, one on the leased node. 1 invisible.
+ assert_list_length:
+ ports: 3
lessee_reader_can_list_ports:
path: '/v1/ports'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
+ assert_list_length:
+ ports: 1
third_party_admin_cannot_list_ports:
path: '/v1/ports'
method: get
headers: *third_party_admin_headers
- assert_status: 99 # TODO This should be 200
- # TODO(Assert list has zero members!)
- skip_reason: policy not implemented
+ assert_status: 200
+ assert_list_length:
+ ports: 0
owner_reader_can_read_port:
path: '/v1/ports/{owner_port_ident}'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_read_port:
path: '/v1/ports/{lessee_port_ident}'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_read_port:
- path: '/v1/ports/{port_ident}'
+ path: '/v1/ports/{other_port_ident}'
method: get
headers: *third_party_admin_headers
- assert_status: 200
- skip_reason: policy not implemented
+ assert_status: 404
# NB: Ports have to be posted with a node UUID to associate to,
# so that seems policy-check-able.
@@ -1615,10 +1711,18 @@ owner_admin_can_add_ports:
method: post
headers: *owner_admin_headers
body: &owner_port_body
- node_uuid: 18a552fb-dcd2-43bf-9302-e4c93287be11
+ node_uuid: 1ab63b9e-66d7-4cd7-8618-dddd0f9f7881
address: 00:01:02:03:04:05
- assert_status: 201
- skip_reason: policy not implemented
+ assert_status: 503
+
+owner_admin_cannot_add_ports_to_other_nodes:
+ path: '/v1/ports'
+ method: post
+ headers: *owner_admin_headers
+ body:
+ node_uuid: 573208e5-cd41-4e26-8f06-ef44022b3793
+ address: 09:01:02:03:04:09
+ assert_status: 403
owner_member_cannot_add_port:
path: '/v1/ports'
@@ -1626,19 +1730,15 @@ owner_member_cannot_add_port:
headers: *owner_member_headers
body: *owner_port_body
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_add_port:
path: '/v1/ports'
method: post
headers: *lessee_admin_headers
body: &lessee_port_body
- node_uuid: 18a552fb-dcd2-43bf-9302-e4c93287be11
+ node_uuid: 38d5abed-c585-4fce-a57e-a2ffc2a2ec6f
address: 00:01:02:03:04:05
assert_status: 403
- skip_reason: policy not implemented
-
-# TODO, likely will need separate port/port groups established for the tests
lessee_member_cannot_add_port:
path: '/v1/ports'
@@ -1646,7 +1746,6 @@ lessee_member_cannot_add_port:
headers: *lessee_member_headers
body: *lessee_port_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_add_port:
path: '/v1/ports'
@@ -1654,7 +1753,6 @@ third_party_admin_cannot_add_port:
headers: *third_party_admin_headers
body: *lessee_port_body
assert_status: 403
- skip_reason: policy not implemented
owner_admin_can_modify_port:
path: '/v1/ports/{owner_port_ident}'
@@ -1665,15 +1763,13 @@ owner_admin_can_modify_port:
path: /extra
value: {'test': 'testing'}
assert_status: 503
- skip_reason: policy not implemented
-owner_member_can_modify_port:
+owner_member_cannot_modify_port:
path: '/v1/ports/{owner_port_ident}'
method: patch
headers: *owner_member_headers
body: *port_patch_body
- assert_status: 503
- skip_reason: policy not implemented
+ assert_status: 403
lessee_admin_cannot_modify_port:
path: '/v1/ports/{lessee_port_ident}'
@@ -1681,7 +1777,6 @@ lessee_admin_cannot_modify_port:
headers: *lessee_admin_headers
body: *port_patch_body
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_modify_port:
path: '/v1/ports/{lessee_port_ident}'
@@ -1689,50 +1784,43 @@ lessee_member_cannot_modify_port:
headers: *lessee_member_headers
body: *port_patch_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_modify_port:
path: '/v1/ports/{lessee_port_ident}'
method: patch
headers: *third_party_admin_headers
body: *port_patch_body
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
owner_admin_can_delete_port:
path: '/v1/ports/{owner_port_ident}'
method: delete
headers: *owner_admin_headers
assert_status: 503
- skip_reason: policy not implemented
owner_member_cannot_delete_port:
path: '/v1/ports/{owner_port_ident}'
method: delete
headers: *owner_member_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_delete_port:
path: '/v1/ports/{lessee_port_ident}'
method: delete
headers: *lessee_admin_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_delete_port:
path: '/v1/ports/{lessee_port_ident}'
method: delete
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_delete_port:
path: '/v1/ports/{lessee_port_ident}'
method: delete
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Ports by node - https://docs.openstack.org/api-ref/baremetal/#listing-ports-by-node-nodes-ports
@@ -1740,47 +1828,45 @@ owner_reader_can_get_node_ports:
path: '/v1/nodes/{owner_node_ident}/ports'
method: get
headers: *owner_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 200
+ assert_list_length:
+ ports: 2
-lessee_reader_can_get_node_porgtroups:
+lessee_reader_can_get_node_port:
path: '/v1/nodes/{lessee_node_ident}/ports'
method: get
headers: *lessee_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 200
+ assert_list_length:
+ ports: 1
third_party_admin_cannot_get_ports:
path: '/v1/nodes/{lessee_node_ident}/ports'
method: get
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Ports by portgroup - https://docs.openstack.org/api-ref/baremetal/#listing-ports-by-portgroup-portgroup-ports
-# Based on potgroups_ports_get* tests
+# Based on portgroups_ports_get* tests
owner_reader_can_get_ports_by_portgroup:
path: '/v1/portgroups/{owner_portgroup_ident}/ports'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_get_ports_by_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}/ports'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_get_ports_by_portgroup:
path: '/v1/portgroups/{other_portgroup_ident}/ports'
method: get
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Volume(s) - https://docs.openstack.org/api-ref/baremetal/#volume-volume
# TODO(TheJulia): volumes will likely need some level of exhaustive testing.
@@ -1796,7 +1882,6 @@ owner_reader_can_list_volume_connectors:
assert_status: 200
assert_list_length:
connectors: 2
- skip_reason: policy not implemented
lessee_reader_can_list_volume_connectors:
path: '/v1/volume/connectors'
@@ -1805,27 +1890,24 @@ lessee_reader_can_list_volume_connectors:
assert_status: 200
assert_list_length:
connectors: 1
- skip_reason: policy not implemented
third_party_admin_cannot_get_connector_list:
- path: '/v1/volume/targets'
+ path: '/v1/volume/connectors'
method: get
headers: *third_party_admin_headers
assert_status: 200
assert_list_length:
connectors: 0
- skip_reason: policy not implemented
owner_admin_can_post_volume_connector:
path: '/v1/volume/connectors'
method: post
- headers: *owner_reader_headers
- assert_status: 400
+ headers: *owner_admin_headers
+ assert_status: 201
body: &volume_connector_body
- node_uuid: 68a552fb-dcd2-43bf-9302-e4c93287be16
+ node_uuid: 1ab63b9e-66d7-4cd7-8618-dddd0f9f7881
type: ip
connector_id: 192.168.1.100
- skip_reason: policy not implemented
lessee_admin_cannot_post_volume_connector:
path: '/v1/volume/connectors'
@@ -1833,7 +1915,6 @@ lessee_admin_cannot_post_volume_connector:
headers: *lessee_admin_headers
assert_status: 403
body: *volume_connector_body
- skip_reason: policy not implemented
third_party_admin_cannot_post_volume_connector:
path: '/v1/volume/connectors'
@@ -1841,28 +1922,24 @@ third_party_admin_cannot_post_volume_connector:
headers: *third_party_admin_headers
assert_status: 403
body: *volume_connector_body
- skip_reason: policy not implemented
owner_reader_can_get_volume_connector:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_get_volume_connector:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_get_volume_connector:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
lessee_member_cannot_patch_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
@@ -1873,7 +1950,6 @@ lessee_member_cannot_patch_volume_connectors:
path: /extra
value: {'test': 'testing'}
assert_status: 403
- skip_reason: policy not implemented
owner_admin_can_patch_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
@@ -1881,7 +1957,6 @@ owner_admin_can_patch_volume_connectors:
headers: *owner_member_headers
body: *connector_patch_body
assert_status: 503
- skip_reason: policy not implemented
lessee_admin_cannot_patch_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
@@ -1889,7 +1964,6 @@ lessee_admin_cannot_patch_volume_connectors:
headers: *owner_member_headers
body: *connector_patch_body
assert_status: 503
- skip_reason: policy not implemented
owner_member_can_patch_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
@@ -1897,7 +1971,6 @@ owner_member_can_patch_volume_connectors:
headers: *owner_member_headers
body: *connector_patch_body
assert_status: 503
- skip_reason: policy not implemented
lessee_member_cannot_patch_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
@@ -1905,7 +1978,6 @@ lessee_member_cannot_patch_volume_connectors:
headers: *lessee_member_headers
body: *connector_patch_body
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_patch_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
@@ -1913,28 +1985,24 @@ third_party_admin_cannot_patch_volume_connectors:
headers: *third_party_admin_headers
body: *connector_patch_body
assert_status: 404
- skip_reason: policy not implemented
owner_admin_can_delete_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
headers: *owner_reader_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_admin_cannot_delete_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
headers: *lessee_reader_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_delete_volume_connector:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Volume targets
@@ -1946,7 +2014,6 @@ owner_reader_can_get_targets:
assert_status: 200
assert_list_length:
targets: 2
- skip_reason: policy not implemented
lesse_reader_can_get_targets:
path: '/v1/volume/targets'
@@ -1955,7 +2022,6 @@ lesse_reader_can_get_targets:
assert_status: 200
assert_list_length:
targets: 1
- skip_reason: policy not implemented
third_party_admin_cannot_get_target_list:
path: '/v1/volume/targets'
@@ -1964,56 +2030,58 @@ third_party_admin_cannot_get_target_list:
assert_status: 200
assert_list_length:
targets: 0
- skip_reason: policy not implemented
owner_reader_can_get_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
+ assert_dict_contains:
+ # This helps assert that the field has been redacted.
+ properties:
+ redacted_contents: '** Value redacted: Requires permission baremetal:volume:view_target_properties access. Permission denied. **'
+
lessee_reader_can_get_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_get_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: get
headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
owner_admin_create_volume_target:
path: '/v1/volume/targets'
method: post
headers: *owner_admin_headers
- assert_status: 400
+ assert_status: 201
body: &volume_target_body
- node_uuid: 68a552fb-dcd2-43bf-9302-e4c93287be16
+ node_uuid: 1ab63b9e-66d7-4cd7-8618-dddd0f9f7881
volume_type: iscsi
- boot_index: 0
+ boot_index: 2
volume_id: 'test-id'
- skip_reason: policy not implemented
lessee_admin_create_volume_target:
path: '/v1/volume/targets'
method: post
headers: *owner_admin_headers
- assert_status: 400
- body: *volume_target_body
- skip_reason: policy not implemented
+ assert_status: 201
+ body:
+ node_uuid: 38d5abed-c585-4fce-a57e-a2ffc2a2ec6f
+ volume_type: iscsi
+ boot_index: 2
+ volume_id: 'test-id2'
third_party_admin_cannot_create_volume_target:
path: '/v1/volume/targets'
method: post
- headers: *owner_admin_headers
- assert_status: 400
+ headers: *third_party_admin_headers
+ assert_status: 403
body: *volume_target_body
- skip_reason: policy not implemented
owner_member_can_patch_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
@@ -2022,16 +2090,22 @@ owner_member_can_patch_volume_target:
- op: replace
path: /extra
value: {'test': 'testing'}
- assert_status: 403
- skip_reason: policy not implemented
+ headers: *owner_member_headers
+ assert_status: 503
-lessee_member_can_patch_volume_target:
+lessee_admin_can_patch_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: patch
body: *volume_target_patch
- headers: *lessee_member_headers
+ headers: *lessee_admin_headers
assert_status: 503
- skip_reason: policy not implemented
+
+lessee_member_cannot_patch_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *lessee_member_headers
+ assert_status: 403
third_party_admin_cannot_patch_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
@@ -2039,86 +2113,74 @@ third_party_admin_cannot_patch_volume_target:
body: *volume_target_patch
headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
owner_admin_can_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
headers: *owner_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 503
lessee_admin_can_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
headers: *lessee_admin_headers
- assert_status: 201
- skip_reason: policy not implemented
+ assert_status: 503
owner_member_cannot_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
headers: *owner_member_headers
assert_status: 403
- skip_reason: policy not implemented
lessee_member_cannot_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
headers: *lessee_member_headers
assert_status: 403
- skip_reason: policy not implemented
third_party_admin_cannot_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 404
# Get Volumes by Node - https://docs.openstack.org/api-ref/baremetal/#listing-volume-resources-by-node-nodes-volume
owner_reader_can_get_volume_connectors:
- path: '/v1/nodes/{node_ident}/volume/connectors'
+ path: '/v1/nodes/{owner_node_ident}/volume/connectors'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_get_node_volume_connectors:
- path: '/v1/nodes/{node_ident}/volume/connectors'
+ path: '/v1/nodes/{lessee_node_ident}/volume/connectors'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_get_node_volume_connectors:
- path: '/v1/nodes/{node_ident}/volume/connectors'
+ path: '/v1/nodes/{lessee_node_ident}/volume/connectors'
method: get
headers: *third_party_admin_headers
- assert_status: 200
- skip_reason: policy not implemented
+ assert_status: 404
owner_reader_can_get_node_volume_targets:
- path: '/v1/nodes/{node_ident}/volume/targets'
+ path: '/v1/nodes/{owner_node_ident}/volume/targets'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_get_node_volume_targets:
- path: '/v1/nodes/{node_ident}/volume/targets'
+ path: '/v1/nodes/{lessee_node_ident}/volume/targets'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_part_admin_cannot_read_node_volume_targets:
- path: '/v1/nodes/{node_ident}/volume/targets'
+ path: '/v1/nodes/{lessee_node_ident}/volume/targets'
method: get
- headers: *owner_reader_headers
+ headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
# Drivers - https://docs.openstack.org/api-ref/baremetal/#drivers-drivers
@@ -2150,45 +2212,39 @@ owner_reader_cannot_get_drivers_vendor_passthru:
path: '/v1/drivers/{driver_name}/vendor_passthru/methods'
method: get
headers: *owner_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 500
lessee_reader_cannot_get_drivers_vendor_passthru:
path: '/v1/drivers/{driver_name}/vendor_passthru/methods'
method: get
headers: *lessee_reader_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 500
third_party_admin_cannot_get_drivers_vendor_passthru:
path: '/v1/drivers/{driver_name}/vendor_passthru/methods'
method: get
headers: *third_party_admin_headers
- assert_status: 403
- skip_reason: policy not implemented
+ assert_status: 500
# Node Bios - https://docs.openstack.org/api-ref/baremetal/#node-bios-nodes
owner_reader_can_get_bios_setttings:
- path: '/v1/nodes/{node_ident}/bios'
+ path: '/v1/nodes/{owner_node_ident}/bios'
method: get
headers: *owner_reader_headers
assert_status: 200
- skip_reason: policy not implemented
lessee_reader_can_get_bios_settings:
- path: '/v1/nodes/{node_ident}/bios'
+ path: '/v1/nodes/{lessee_node_ident}/bios'
method: get
headers: *lessee_reader_headers
assert_status: 200
- skip_reason: policy not implemented
third_party_admin_cannot_get_bios_settings:
path: '/v1/nodes/{owner_node_ident}/bios'
method: get
headers: *third_party_admin_headers
assert_status: 404
- skip_reason: policy not implemented
# Conductors - https://docs.openstack.org/api-ref/baremetal/#allocations-allocations
diff --git a/ironic/tests/unit/api/test_rbac_system_scoped.yaml b/ironic/tests/unit/api/test_rbac_system_scoped.yaml
index a48dcb9be..c0126b04d 100644
--- a/ironic/tests/unit/api/test_rbac_system_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_system_scoped.yaml
@@ -147,6 +147,26 @@ nodes_node_ident_patch_admin:
value: {'test': 'testing'}
assert_status: 503
+system_admin_can_patch_chassis:
+ path: '/v1/nodes/{node_ident}'
+ method: patch
+ headers: *admin_headers
+ body:
+ - op: replace
+ path: /chassis_uuid
+ value: 'e74c40e0-d825-11e2-a28f-0800200c9a66'
+ assert_status: 503
+
+system_member_can_patch_conductor_group:
+ path: '/v1/nodes/{node_ident}'
+ method: patch
+ headers: *scoped_member_headers
+ body:
+ - op: replace
+ path: /conductor_group
+ value: "DC04-ROW39"
+ assert_status: 503
+
nodes_node_ident_patch_member:
path: '/v1/nodes/{node_ident}'
method: patch
@@ -921,7 +941,9 @@ ports_post_member:
method: post
headers: *scoped_member_headers
assert_status: 403
- body: *port_body
+ body:
+ node_uuid: 22e26c0b-03f2-4d2e-ae87-c02d7f33c000
+ address: 03:04:05:06:07:08
ports_post_reader:
path: '/v1/ports'
@@ -1138,9 +1160,9 @@ volume_connectors_post_admin:
path: '/v1/volume/connectors'
method: post
headers: *admin_headers
- assert_status: 400
+ assert_status: 201
body: &volume_connector_body
- node_uuid: 68a552fb-dcd2-43bf-9302-e4c93287be16
+ node_uuid: 1be26c0b-03f2-4d2e-ae87-c02d7f33c123
type: ip
connector_id: 192.168.1.100
@@ -1150,7 +1172,7 @@ volume_connectors_post_member:
path: '/v1/volume/connectors'
method: post
headers: *scoped_member_headers
- assert_status: 400
+ assert_status: 201
body: *volume_connector_body
volume_connectors_post_reader:
@@ -1247,19 +1269,23 @@ volume_targets_post_admin:
path: '/v1/volume/targets'
method: post
headers: *admin_headers
- assert_status: 400
+ assert_status: 201
body: &volume_target_body
- node_uuid: 68a552fb-dcd2-43bf-9302-e4c93287be16
+ node_uuid: 1be26c0b-03f2-4d2e-ae87-c02d7f33c123
volume_type: iscsi
- boot_index: 0
+ boot_index: 1
volume_id: 'test-id'
volume_targets_post_member:
path: '/v1/volume/targets'
method: post
headers: *scoped_member_headers
- assert_status: 400
- body: *volume_target_body
+ assert_status: 201
+ body:
+ node_uuid: 1be26c0b-03f2-4d2e-ae87-c02d7f33c123
+ volume_type: iscsi
+ boot_index: 2
+ volume_id: 'test-id2'
volume_targets_post_reader:
path: '/v1/volume/targets'
diff --git a/ironic/tests/unit/common/test_driver_factory.py b/ironic/tests/unit/common/test_driver_factory.py
index 3d9fb3e90..77626a084 100644
--- a/ironic/tests/unit/common/test_driver_factory.py
+++ b/ironic/tests/unit/common/test_driver_factory.py
@@ -214,6 +214,26 @@ class CheckAndUpdateNodeInterfacesTestCase(db_base.DbTestCase):
driver_factory.check_and_update_node_interfaces,
node)
+ def test_create_node_valid_network_interface_instance_info_override(self):
+ instance_info = {'network_interface': 'noop',
+ 'storage_interface': 'noop'}
+ node = obj_utils.get_test_node(self.context,
+ instance_info=instance_info)
+ self.assertTrue(driver_factory.check_and_update_node_interfaces(node))
+ self.assertIsNone(node.network_interface)
+ self.assertIsNone(node.storage_interface)
+ self.assertEqual('noop', node.instance_info.get('network_interface'))
+ self.assertEqual('noop', node.instance_info.get('storage_interface'))
+
+ def test_create_node_invalid_network_interface_instance_info_override(
+ self):
+ instance_info = {'network_interface': 'banana'}
+ node = obj_utils.get_test_node(self.context,
+ instance_info=instance_info)
+ self.assertRaises(exception.InterfaceNotFoundInEntrypoint,
+ driver_factory.check_and_update_node_interfaces,
+ node)
+
def _get_valid_default_interface_name(self, iface):
i_name = 'fake'
# there is no 'fake' network interface
@@ -506,6 +526,17 @@ class HardwareTypeLoadTestCase(db_base.DbTestCase):
self.assertRaises(exception.InterfaceNotFoundInEntrypoint,
task_manager.acquire, self.context, node.id)
+ def test_build_driver_for_task_instance_info_override(self):
+ self.config(enabled_network_interfaces=['noop', 'neutron'])
+ instance_info = {'network_interface': 'neutron'}
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ instance_info=instance_info,
+ **self.node_kwargs)
+ with task_manager.acquire(self.context, node.id) as task:
+ self.assertEqual(
+ getattr(task.driver, 'network').__class__.__name__,
+ 'NeutronNetwork')
+
def test_no_storage_interface(self):
node = obj_utils.get_test_node(self.context)
self.assertTrue(driver_factory.check_and_update_node_interfaces(node))
diff --git a/ironic/tests/unit/common/test_pxe_utils.py b/ironic/tests/unit/common/test_pxe_utils.py
index c5be259b1..ce56eb276 100644
--- a/ironic/tests/unit/common/test_pxe_utils.py
+++ b/ironic/tests/unit/common/test_pxe_utils.py
@@ -103,6 +103,17 @@ class TestPXEUtils(db_base.DbTestCase):
'password': 'fake_password',
})
+ self.ipxe_options_boot_from_volume_multipath = \
+ self.ipxe_options.copy()
+ self.ipxe_options_boot_from_volume_multipath.update({
+ 'boot_from_volume': True,
+ 'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn '
+ 'iscsi:faker_host::3260:0:fake_iqn',
+ 'iscsi_initiator_iqn': 'fake_iqn',
+ 'username': 'fake_username',
+ 'password': 'fake_password',
+ })
+
self.ipxe_options_boot_from_volume_no_extra_volume.pop(
'initrd_filename', None)
self.ipxe_options_boot_from_volume_extra_volume.pop(
@@ -183,6 +194,25 @@ class TestPXEUtils(db_base.DbTestCase):
self.assertEqual(str(expected_template), rendered_template)
+ def test_default_ipxe_boot_from_volume_config_multipath(self):
+ self.config(
+ pxe_config_template='ironic/drivers/modules/ipxe_config.template',
+ group='pxe'
+ )
+ self.config(http_url='http://1.2.3.4:1234', group='deploy')
+ rendered_template = utils.render_template(
+ CONF.pxe.pxe_config_template,
+ {'pxe_options': self.ipxe_options_boot_from_volume_multipath,
+ 'ROOT': '{{ ROOT }}',
+ 'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
+
+ templ_file = 'ironic/tests/unit/drivers/' \
+ 'ipxe_config_boot_from_volume_multipath.template'
+ with open(templ_file) as f:
+ expected_template = f.read().rstrip()
+
+ self.assertEqual(str(expected_template), rendered_template)
+
def test_default_ipxe_boot_from_volume_config(self):
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
@@ -1492,7 +1522,8 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
debug=False,
boot_from_volume=False,
mode='deploy',
- iso_boot=False):
+ iso_boot=False,
+ multipath=False):
self.config(debug=debug)
self.config(pxe_append_params='test_param', group='pxe')
self.config(ipxe_timeout=ipxe_timeout, group='pxe')
@@ -1587,7 +1618,6 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
if boot_from_volume:
expected_options.update({
'boot_from_volume': True,
- 'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn_initiator',
'iscsi_volumes': [{'url': 'iscsi:fake_host::3260:1:fake_iqn',
'username': 'fake_username_1',
@@ -1596,6 +1626,15 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
'username': 'fake_username',
'password': 'fake_password'
})
+ if multipath:
+ expected_options.update({
+ 'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn '
+ 'iscsi:faker_host::3261:0:fake_iqn',
+ })
+ else:
+ expected_options.update({
+ 'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
+ })
expected_options.pop('deployment_aki_path')
expected_options.pop('deployment_ari_path')
expected_options.pop('initrd_filename')
@@ -1701,7 +1740,8 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
'auth_username': 'fake_username_1',
'auth_password': 'fake_password_1'})
self.node.driver_internal_info.update({'boot_from_volume': vol_id})
- self._test_build_pxe_config_options_ipxe(boot_from_volume=True)
+ self._test_build_pxe_config_options_ipxe(boot_from_volume=True,
+ multipath=True)
def test_get_volume_pxe_options(self):
vol_id = uuidutils.generate_uuid()
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index 348c94a39..f332faf46 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -3391,9 +3391,9 @@ class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
console_enabled=True)
self._start_service()
self.service.set_console_mode(self.context, node.uuid, True)
- self._stop_service()
self.assertFalse(mock_sc.called)
self.assertFalse(mock_notify.called)
+ self._stop_service()
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
diff --git a/ironic/tests/unit/conductor/test_task_manager.py b/ironic/tests/unit/conductor/test_task_manager.py
index 6421eff4f..c9efb17ae 100644
--- a/ironic/tests/unit/conductor/test_task_manager.py
+++ b/ironic/tests/unit/conductor/test_task_manager.py
@@ -114,7 +114,13 @@ class TaskManagerTestCase(db_base.DbTestCase):
get_voltgt_mock.return_value = mock.sentinel.voltgt1
build_driver_mock.return_value = mock.sentinel.driver1
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_all(task):
+ return task.ports, task.portgroups, task.volume_targets, \
+ task.volume_connectors
+
with task_manager.TaskManager(self.context, 'node-id1') as task:
+ _eval_all(task)
reserve_mock.return_value = node2
get_ports_mock.return_value = mock.sentinel.ports2
get_portgroups_mock.return_value = mock.sentinel.portgroups2
@@ -122,6 +128,7 @@ class TaskManagerTestCase(db_base.DbTestCase):
get_voltgt_mock.return_value = mock.sentinel.voltgt2
build_driver_mock.return_value = mock.sentinel.driver2
with task_manager.TaskManager(self.context, 'node-id2') as task2:
+ _eval_all(task2)
self.assertEqual(self.context, task.context)
self.assertEqual(self.node, task.node)
self.assertEqual(mock.sentinel.ports1, task.ports)
@@ -274,16 +281,18 @@ class TaskManagerTestCase(db_base.DbTestCase):
reserve_mock.return_value = self.node
get_ports_mock.side_effect = exception.IronicException('foo')
- self.assertRaises(exception.IronicException,
- task_manager.TaskManager,
- self.context,
- 'fake-node-id')
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_ports(task):
+ return task.ports
+
+ with task_manager.TaskManager(self.context, 'fake-node-id') as task:
+ self.assertRaises(exception.IronicException, _eval_ports, task)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
- self.assertFalse(build_driver_mock.called)
+ self.assertTrue(build_driver_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
@@ -294,16 +303,19 @@ class TaskManagerTestCase(db_base.DbTestCase):
reserve_mock.return_value = self.node
get_portgroups_mock.side_effect = exception.IronicException('foo')
- self.assertRaises(exception.IronicException,
- task_manager.TaskManager,
- self.context,
- 'fake-node-id')
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_portgroups(task):
+ return task.portgroups
+
+ with task_manager.TaskManager(self.context, 'fake-node-id') as task:
+ self.assertRaises(exception.IronicException, _eval_portgroups,
+ task)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
- self.assertFalse(build_driver_mock.called)
+ self.assertTrue(build_driver_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
@@ -314,15 +326,18 @@ class TaskManagerTestCase(db_base.DbTestCase):
reserve_mock.return_value = self.node
get_volconn_mock.side_effect = exception.IronicException('foo')
- self.assertRaises(exception.IronicException,
- task_manager.TaskManager,
- self.context,
- 'fake-node-id')
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_volconn(task):
+ return task.volume_connectors
+
+ with task_manager.TaskManager(self.context, 'fake-node-id') as task:
+ self.assertRaises(exception.IronicException, _eval_volconn,
+ task)
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
- self.assertFalse(build_driver_mock.called)
+ self.assertTrue(build_driver_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
@@ -334,15 +349,17 @@ class TaskManagerTestCase(db_base.DbTestCase):
reserve_mock.return_value = self.node
get_voltgt_mock.side_effect = exception.IronicException('foo')
- self.assertRaises(exception.IronicException,
- task_manager.TaskManager,
- self.context,
- 'fake-node-id')
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_voltgt(task):
+ return task.volume_targets
+
+ with task_manager.TaskManager(self.context, 'fake-node-id') as task:
+ self.assertRaises(exception.IronicException, _eval_voltgt, task)
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
- self.assertFalse(build_driver_mock.called)
+ self.assertTrue(build_driver_mock.called)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
@@ -363,8 +380,10 @@ class TaskManagerTestCase(db_base.DbTestCase):
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
reserve_mock.assert_called_once_with(self.context, self.host,
'fake-node-id')
- get_ports_mock.assert_called_once_with(self.context, self.node.id)
- get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
+ self.assertFalse(get_ports_mock.called)
+ self.assertFalse(get_portgroups_mock.called)
+ self.assertFalse(get_volconn_mock.called)
+ self.assertFalse(get_voltgt_mock.called)
build_driver_mock.assert_called_once_with(mock.ANY)
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
@@ -424,17 +443,19 @@ class TaskManagerTestCase(db_base.DbTestCase):
node_get_mock.return_value = self.node
get_ports_mock.side_effect = exception.IronicException('foo')
- self.assertRaises(exception.IronicException,
- task_manager.TaskManager,
- self.context,
- 'fake-node-id',
- shared=True)
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_ports(task):
+ return task.ports
+
+ with task_manager.TaskManager(self.context, 'fake-node-id',
+ shared=True) as task:
+ self.assertRaises(exception.IronicException, _eval_ports, task)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_ports_mock.assert_called_once_with(self.context, self.node.id)
- self.assertFalse(build_driver_mock.called)
+ self.assertTrue(build_driver_mock.called)
def test_shared_lock_get_portgroups_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
@@ -443,17 +464,20 @@ class TaskManagerTestCase(db_base.DbTestCase):
node_get_mock.return_value = self.node
get_portgroups_mock.side_effect = exception.IronicException('foo')
- self.assertRaises(exception.IronicException,
- task_manager.TaskManager,
- self.context,
- 'fake-node-id',
- shared=True)
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_portgroups(task):
+ return task.portgroups
+
+ with task_manager.TaskManager(self.context, 'fake-node-id',
+ shared=True) as task:
+ self.assertRaises(exception.IronicException, _eval_portgroups,
+ task)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
- self.assertFalse(build_driver_mock.called)
+ self.assertTrue(build_driver_mock.called)
def test_shared_lock_get_volconn_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
@@ -462,17 +486,19 @@ class TaskManagerTestCase(db_base.DbTestCase):
node_get_mock.return_value = self.node
get_volconn_mock.side_effect = exception.IronicException('foo')
- self.assertRaises(exception.IronicException,
- task_manager.TaskManager,
- self.context,
- 'fake-node-id',
- shared=True)
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_volconn(task):
+ return task.volume_connectors
+
+ with task_manager.TaskManager(self.context, 'fake-node-id',
+ shared=True) as task:
+ self.assertRaises(exception.IronicException, _eval_volconn, task)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_volconn_mock.assert_called_once_with(self.context, self.node.id)
- self.assertFalse(get_voltgt_mock.called)
+ self.assertTrue(build_driver_mock.called)
def test_shared_lock_get_voltgt_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
@@ -481,17 +507,19 @@ class TaskManagerTestCase(db_base.DbTestCase):
node_get_mock.return_value = self.node
get_voltgt_mock.side_effect = exception.IronicException('foo')
- self.assertRaises(exception.IronicException,
- task_manager.TaskManager,
- self.context,
- 'fake-node-id',
- shared=True)
+ # Note(arne_wiebalck): Force loading of lazy-loaded properties.
+ def _eval_voltgt(task):
+ return task.volume_targets
+
+ with task_manager.TaskManager(self.context, 'fake-node-id',
+ shared=True) as task:
+ self.assertRaises(exception.IronicException, _eval_voltgt, task)
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
- self.assertFalse(build_driver_mock.called)
+ self.assertTrue(build_driver_mock.called)
def test_shared_lock_build_driver_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
@@ -510,10 +538,10 @@ class TaskManagerTestCase(db_base.DbTestCase):
self.assertFalse(reserve_mock.called)
self.assertFalse(release_mock.called)
node_get_mock.assert_called_once_with(self.context, 'fake-node-id')
- get_ports_mock.assert_called_once_with(self.context, self.node.id)
- get_portgroups_mock.assert_called_once_with(self.context, self.node.id)
- get_volconn_mock.assert_called_once_with(self.context, self.node.id)
- get_voltgt_mock.assert_called_once_with(self.context, self.node.id)
+ self.assertFalse(get_ports_mock.called)
+ self.assertFalse(get_portgroups_mock.called)
+ self.assertFalse(get_voltgt_mock.called)
+ self.assertFalse(get_volconn_mock.called)
build_driver_mock.assert_called_once_with(mock.ANY)
def test_upgrade_lock(
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template
new file mode 100644
index 000000000..f5027b3af
--- /dev/null
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_volume_multipath.template
@@ -0,0 +1,56 @@
+#!ipxe
+
+set attempts:int32 10
+set i:int32 0
+
+goto deploy
+
+:deploy
+imgfree
+kernel http://1.2.3.4:1234/deploy_kernel selinux=0 troubleshoot=0 text test_param BOOTIF=${mac} initrd=deploy_ramdisk || goto retry
+
+initrd http://1.2.3.4:1234/deploy_ramdisk || goto retry
+boot
+
+:retry
+iseq ${i} ${attempts} && goto fail ||
+inc i
+echo No response, retrying in ${i} seconds.
+sleep ${i}
+goto deploy
+
+:fail
+echo Failed to get a response after ${attempts} attempts
+echo Powering off in 30 seconds.
+sleep 30
+poweroff
+
+:boot_partition
+imgfree
+kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
+initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
+boot
+
+:boot_ramdisk
+imgfree
+kernel http://1.2.3.4:1234/kernel root=/dev/ram0 text test_param ramdisk_param initrd=ramdisk || goto boot_ramdisk
+initrd http://1.2.3.4:1234/ramdisk || goto boot_ramdisk
+boot
+
+:boot_iscsi
+imgfree
+set username fake_username
+set password fake_password
+set initiator-iqn fake_iqn
+sanhook --drive 0x80 iscsi:fake_host::3260:0:fake_iqn iscsi:faker_host::3260:0:fake_iqn || goto fail_iscsi_retry
+
+
+sanboot --no-describe || goto fail_iscsi_retry
+
+:fail_iscsi_retry
+echo Failed to attach iSCSI volume(s), retrying in 10 seconds.
+sleep 10
+goto boot_iscsi
+
+:boot_whole_disk
+sanboot --no-describe
diff --git a/ironic/tests/unit/drivers/modules/drac/test_inspect.py b/ironic/tests/unit/drivers/modules/drac/test_inspect.py
index ecb9346f2..64dc6d39f 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_inspect.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_inspect.py
@@ -287,10 +287,10 @@ class DracInspectionTestCase(test_utils.BaseDracTest):
'pci_subdevice_id': '0737',
'pci_subvendor_id': '1028'},
{'id': 'Video.Slot.7-1',
- 'description': 'GV100GL [Tesla V100 PCIe 16GB]]',
+ 'description': 'GV100 [TITAN V]',
'function_number': 0,
'manufacturer': 'NVIDIA Corporation',
- 'pci_device_id': '1DB4',
+ 'pci_device_id': '1D81',
'pci_vendor_id': '10DE',
'pci_subdevice_id': '1214',
'pci_subvendor_id': '10DE'}]
@@ -325,6 +325,56 @@ class DracInspectionTestCase(test_utils.BaseDracTest):
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
+ def test_inspect_hardware_multiple_supported_gpu(self, mock_port_create,
+ mock_get_drac_client):
+ controllers = [
+ {'id': 'Video.Slot.7-1',
+ 'description': 'TU104GL [Tesla T4]',
+ 'function_number': 0,
+ 'manufacturer': 'NVIDIA Corporation',
+ 'pci_device_id': '1EB8',
+ 'pci_vendor_id': '10DE',
+ 'pci_subdevice_id': '12A2',
+ 'pci_subvendor_id': '10DE'},
+ {'id': 'Video.Slot.8-1',
+ 'description': 'GV100GL [Tesla V100 PCIe 16GB]',
+ 'function_number': 0,
+ 'manufacturer': 'NVIDIA Corporation',
+ 'pci_device_id': '1DB4',
+ 'pci_vendor_id': '10DE',
+ 'pci_subdevice_id': '1214',
+ 'pci_subvendor_id': '10DE'}]
+
+ expected_node_properties = {
+ 'memory_mb': 32768,
+ 'local_gb': 279,
+ 'cpus': 18,
+ 'cpu_arch': 'x86_64',
+ 'capabilities': 'boot_mode:uefi,pci_gpu_devices:2'}
+ mock_client = mock.Mock()
+ mock_get_drac_client.return_value = mock_client
+ mock_client.list_memory.return_value = self.memory
+ mock_client.list_cpus.return_value = self.cpus
+ mock_client.list_virtual_disks.return_value = []
+ mock_client.list_physical_disks.return_value = self.physical_disks
+ mock_client.list_nics.return_value = self.nics
+ mock_client.list_bios_settings.return_value = self.uefi_boot_settings
+ video_controllers = [test_utils.dict_to_namedtuple(values=vc)
+ for vc in controllers]
+ mock_client.list_video_controllers.return_value = video_controllers
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ return_value = task.driver.inspect.inspect_hardware(task)
+
+ self.node.refresh()
+ self.assertEqual(expected_node_properties, self.node.properties)
+ self.assertEqual(states.MANAGEABLE, return_value)
+ self.assertEqual(2, mock_port_create.call_count)
+
+ @mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
+ autospec=True)
+ @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
def test_inspect_hardware_no_gpu(self, mock_port_create,
mock_get_drac_client):
expected_node_properties = {
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_boot.py b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
index 373918c20..761a910e0 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
@@ -54,7 +54,9 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
def test_parse_driver_info_deploy_iso(self):
self.node.driver_info['ilo_deploy_iso'] = 'deploy-iso'
+ self.node.driver_info['ilo_kernel_append_params'] = 'kernel-param'
expected_driver_info = {'ilo_bootloader': None,
+ 'ilo_kernel_append_params': 'kernel-param',
'ilo_deploy_iso': 'deploy-iso'}
actual_driver_info = ilo_boot.parse_driver_info(self.node)
@@ -63,6 +65,7 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
def test_parse_driver_info_rescue_iso(self):
self.node.driver_info['ilo_rescue_iso'] = 'rescue-iso'
expected_driver_info = {'ilo_bootloader': None,
+ 'ilo_kernel_append_params': None,
'ilo_rescue_iso': 'rescue-iso'}
actual_driver_info = ilo_boot.parse_driver_info(self.node, 'rescue')
@@ -72,9 +75,11 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
self.node.driver_info['ilo_deploy_kernel'] = 'kernel'
self.node.driver_info['ilo_deploy_ramdisk'] = 'ramdisk'
self.node.driver_info['ilo_bootloader'] = 'bootloader'
+ self.node.driver_info['ilo_kernel_append_params'] = 'kernel-param'
expected_driver_info = {'ilo_deploy_kernel': 'kernel',
'ilo_deploy_ramdisk': 'ramdisk',
- 'ilo_bootloader': 'bootloader'}
+ 'ilo_bootloader': 'bootloader',
+ 'ilo_kernel_append_params': 'kernel-param'}
actual_driver_info = ilo_boot.parse_driver_info(self.node)
self.assertEqual(expected_driver_info, actual_driver_info)
@@ -85,7 +90,8 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
self.node.driver_info['ilo_bootloader'] = 'bootloader'
expected_driver_info = {'ilo_rescue_kernel': 'kernel',
'ilo_rescue_ramdisk': 'ramdisk',
- 'ilo_bootloader': 'bootloader'}
+ 'ilo_bootloader': 'bootloader',
+ 'ilo_kernel_append_params': None}
actual_driver_info = ilo_boot.parse_driver_info(self.node, 'rescue')
self.assertEqual(expected_driver_info, actual_driver_info)
@@ -96,7 +102,8 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
CONF.conductor.bootloader = 'bootloader'
expected_driver_info = {'ilo_deploy_kernel': 'kernel',
'ilo_deploy_ramdisk': 'ramdisk',
- 'ilo_bootloader': 'bootloader'}
+ 'ilo_bootloader': 'bootloader',
+ 'ilo_kernel_append_params': None}
actual_driver_info = ilo_boot.parse_driver_info(self.node)
self.assertEqual(expected_driver_info, actual_driver_info)
@@ -108,7 +115,8 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
expected_driver_info = {'ilo_rescue_kernel': 'kernel',
'ilo_rescue_ramdisk': 'ramdisk',
- 'ilo_bootloader': 'bootloader'}
+ 'ilo_bootloader': 'bootloader',
+ 'ilo_kernel_append_params': None}
actual_driver_info = ilo_boot.parse_driver_info(self.node, 'rescue')
self.assertEqual(expected_driver_info, actual_driver_info)
@@ -119,7 +127,8 @@ class IloBootCommonMethodsTestCase(test_common.BaseIloTest):
expected_driver_info = {'ilo_deploy_kernel': 'kernel',
'ilo_deploy_ramdisk': 'ramdisk',
- 'ilo_bootloader': None}
+ 'ilo_bootloader': None,
+ 'ilo_kernel_append_params': None}
actual_driver_info = ilo_boot.parse_driver_info(self.node)
self.assertEqual(expected_driver_info, actual_driver_info)
@@ -1535,7 +1544,8 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
deploy_info = {
'ilo_deploy_kernel': 'deploy-kernel',
'ilo_deploy_ramdisk': 'deploy-ramdisk',
- 'ilo_bootloader': 'bootloader'
+ 'ilo_bootloader': 'bootloader',
+ 'ilo_kernel_append_params': 'nofb nomodeset vga=normal'
}
deploy_info.update({'ilo_username': 'admin',
@@ -1566,6 +1576,7 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
driver_info['ilo_rescue_ramdisk'] = 'rescue-ramdisk'
driver_info['ilo_bootloader'] = 'bootloader'
driver_info['ilo_add_certificates'] = 'false'
+ driver_info['ilo_kernel_append_params'] = 'kernel-param'
driver_info['dummy_key'] = 'dummy-value'
self.node.driver_info = driver_info
self.node.save()
@@ -1575,7 +1586,8 @@ class IloUefiHttpsBootTestCase(db_base.DbTestCase):
deploy_info = {
'ilo_rescue_kernel': 'rescue-kernel',
'ilo_rescue_ramdisk': 'rescue-ramdisk',
- 'ilo_bootloader': 'bootloader'
+ 'ilo_bootloader': 'bootloader',
+ 'ilo_kernel_append_params': 'kernel-param'
}
deploy_info.update({'ilo_username': 'admin',
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_inspect.py b/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
index 9eb8dbb85..d12e1e0b3 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
@@ -218,24 +218,7 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
_inspect_hardware_mock.assert_called_once_with(task.node,
existing_traits)
- # note (naohirot):
- # as of mock 1.2, assert_has_calls has a bug which returns
- # "AssertionError: Calls not found." if mock_calls has class
- # method call such as below:
-
- # AssertionError: Calls not found.
- # Expected: [call.list_by_node_id(
- # <oslo_context.context.RequestContext object at 0x7f1a34f8c0d0>,
- # 1)]
- # Actual: [call.list_by_node_id(
- # <oslo_context.context.RequestContext object at 0x7f1a34f8c0d0>,
- # 1)]
- #
- # workaround, remove class method call from mock_calls list
- del port_mock.mock_calls[0]
port_mock.assert_has_calls([
- # workaround, comment out class method call from expected list
- # mock.call.list_by_node_id(task.context, node_id),
mock.call(task.context, address=inspected_macs[0],
node_id=node_id),
mock.call(task.context, address=inspected_macs[1],
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_raid.py b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
new file mode 100644
index 000000000..3e46c388d
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
@@ -0,0 +1,846 @@
+# Copyright 2021 DMTF. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from oslo_utils import importutils
+from oslo_utils import units
+
+from ironic.common import exception
+from ironic.common import states
+from ironic.conductor import task_manager
+from ironic.conductor import utils as manager_utils
+from ironic.drivers.modules import deploy_utils
+from ironic.drivers.modules.redfish import boot as redfish_boot
+from ironic.drivers.modules.redfish import raid as redfish_raid
+from ironic.drivers.modules.redfish import utils as redfish_utils
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.db import utils as db_utils
+from ironic.tests.unit.objects import utils as obj_utils
+
+sushy = importutils.try_import('sushy')
+
+INFO_DICT = db_utils.get_test_redfish_info()
+
+
+def _mock_drive(identity, block_size_bytes=None, capacity_bytes=None,
+ media_type=None, name=None, protocol=None):
+ return mock.MagicMock(
+ _path='/redfish/v1/Systems/1/Storage/1/Drives/' + identity,
+ identity=identity,
+ block_size_bytes=block_size_bytes,
+ capacity_bytes=capacity_bytes,
+ media_type=media_type,
+ name=name,
+ protocol=protocol
+ )
+
+
+def _mock_volume(identity, volume_type=None, raid_type=None):
+ volume = mock.MagicMock(
+ _path='/redfish/v1/Systems/1/Storage/1/Volumes/' + identity,
+ identity=identity,
+ volume_type=volume_type,
+ raid_type=raid_type
+ )
+ # Mocking Immediate that does not return anything
+ volume.delete.return_value = None
+ return volume
+
+
+@mock.patch('oslo_utils.eventletutils.EventletEvent.wait',
+ lambda *args, **kwargs: None)
+@mock.patch.object(redfish_utils, 'get_system', autospec=True)
+class RedfishRAIDTestCase(db_base.DbTestCase):
+
+ def setUp(self):
+ super(RedfishRAIDTestCase, self).setUp()
+ self.config(enabled_hardware_types=['redfish'],
+ enabled_power_interfaces=['redfish'],
+ enabled_boot_interfaces=['redfish-virtual-media'],
+ enabled_management_interfaces=['redfish'],
+ enabled_inspect_interfaces=['redfish'],
+ enabled_bios_interfaces=['redfish'],
+ enabled_raid_interfaces=['redfish']
+ )
+ self.node = obj_utils.create_test_node(
+ self.context, driver='redfish', driver_info=INFO_DICT)
+ self.mock_storage = mock.MagicMock()
+ self.drive_id1 = '35D38F11ACEF7BD3'
+ self.drive_id2 = '3F5A8C54207B7233'
+ self.drive_id3 = '32ADF365C6C1B7BD'
+ self.drive_id4 = '3D58ECBC375FD9F2'
+ mock_drives = []
+ for i in [self.drive_id1, self.drive_id2, self.drive_id3,
+ self.drive_id4]:
+ mock_drives.append(_mock_drive(
+ identity=i, block_size_bytes=512, capacity_bytes=899527000000,
+ media_type='HDD', name='Drive', protocol='SAS'))
+ self.mock_storage.drives = mock_drives
+ mock_identifier = mock.Mock()
+ mock_identifier.durable_name = '345C59DBD970859C'
+ mock_controller = mock.Mock()
+ mock_controller.identifiers = [mock_identifier]
+ self.mock_storage.storage_controllers = [mock_controller]
+ mock_volumes = mock.MagicMock()
+ self.mock_storage.volumes = mock_volumes
+ self.free_space_bytes = {d: d.capacity_bytes for d in
+ mock_drives}
+ self.physical_disks = mock_drives
+
+ @mock.patch.object(redfish_raid, 'sushy', None)
+ def test_loading_error(self, mock_get_system):
+ self.assertRaisesRegex(
+ exception.DriverLoadError,
+ 'Unable to import the sushy library',
+ redfish_raid.RedfishRAID)
+
+ def test__max_volume_size_bytes_raid0(self, mock_get_system):
+ spans = redfish_raid._calculate_spans('0', 3)
+ max_size = redfish_raid._max_volume_size_bytes(
+ '0', self.physical_disks[0:3], self.free_space_bytes,
+ spans_count=spans)
+ self.assertEqual(2698380312576, max_size)
+
+ def test__max_volume_size_bytes_raid1(self, mock_get_system):
+ spans = redfish_raid._calculate_spans('1', 2)
+ max_size = redfish_raid._max_volume_size_bytes(
+ '1', self.physical_disks[0:2], self.free_space_bytes,
+ spans_count=spans)
+ self.assertEqual(899460104192, max_size)
+
+ def test__max_volume_size_bytes_raid5(self, mock_get_system):
+ spans = redfish_raid._calculate_spans('5', 3)
+ max_size = redfish_raid._max_volume_size_bytes(
+ '5', self.physical_disks[0:3], self.free_space_bytes,
+ spans_count=spans)
+ self.assertEqual(1798920208384, max_size)
+
+ def test__max_volume_size_bytes_raid6(self, mock_get_system):
+ spans = redfish_raid._calculate_spans('6', 4)
+ max_size = redfish_raid._max_volume_size_bytes(
+ '6', self.physical_disks[0:4], self.free_space_bytes,
+ spans_count=spans)
+ self.assertEqual(1798920208384, max_size)
+
+ def test__volume_usage_per_disk_bytes_raid5(self, mock_get_system):
+ logical_disk = {
+ 'size_gb': 100,
+ 'raid_level': '5',
+ 'controller': 'Smart Array P822 in Slot 3',
+ 'physical_disks': [
+ '35D38F11ACEF7BD3',
+ '3F5A8C54207B7233',
+ '32ADF365C6C1B7BD'
+ ],
+ 'is_root_volume': True
+ }
+ logical_disk['size_bytes'] = logical_disk['size_gb'] * units.Gi
+ del logical_disk['size_gb']
+ spans = redfish_raid._calculate_spans('5', 3)
+ usage_bytes = redfish_raid._volume_usage_per_disk_bytes(
+ logical_disk, self.physical_disks[0:3], spans_count=spans)
+ self.assertEqual(53687091200, usage_bytes)
+
+ def test__volume_usage_per_disk_bytes_raid10(self, mock_get_system):
+ logical_disk = {
+ 'size_gb': 50,
+ 'raid_level': '1+0',
+ 'controller': 'RAID.Integrated.1-1',
+ 'volume_name': 'root_volume',
+ 'is_root_volume': True,
+ 'physical_disks': [
+ '35D38F11ACEF7BD3',
+ '3F5A8C54207B7233',
+ '32ADF365C6C1B7BD',
+ '3D58ECBC375FD9F2'
+ ]
+ }
+ logical_disk['size_bytes'] = logical_disk['size_gb'] * units.Gi
+ del logical_disk['size_gb']
+ spans = redfish_raid._calculate_spans('1+0', 4)
+ usage_bytes = redfish_raid._volume_usage_per_disk_bytes(
+ logical_disk, self.physical_disks[0:4], spans_count=spans)
+ self.assertEqual(26843545600, usage_bytes)
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_1a(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 'MAX',
+ 'raid_level': '5',
+ 'is_root_volume': True
+ }
+ ]
+ }
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue,
+ "'physical_disks' is missing from logical_disk while "
+ "'size_gb'='MAX' was requested",
+ task.driver.raid.create_configuration, task)
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_1b(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 100,
+ 'raid_level': '5',
+ 'is_root_volume': True
+ }
+ ]
+ }
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.create_configuration(task)
+ pre = '/redfish/v1/Systems/1/Storage/1/Drives/'
+ expected_payload = {
+ 'Encrypted': False,
+ 'VolumeType': 'StripedWithParity',
+ 'RAIDType': 'RAID5',
+ 'CapacityBytes': 107374182400,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id1},
+ {'@odata.id': pre + self.drive_id2},
+ {'@odata.id': pre + self.drive_id3}
+ ]
+ }
+ }
+ self.mock_storage.volumes.create.assert_called_once_with(
+ expected_payload, apply_time=None
+ )
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_1b_apply_time_immediate(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 100,
+ 'raid_level': '5',
+ 'is_root_volume': True
+ }
+ ]
+ }
+ volumes = mock.MagicMock()
+ op_apply_time_support = mock.MagicMock()
+ op_apply_time_support.mapped_supported_values = [
+ sushy.APPLY_TIME_IMMEDIATE, sushy.APPLY_TIME_ON_RESET]
+ volumes.operation_apply_time_support = op_apply_time_support
+ self.mock_storage.volumes = volumes
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ resource = mock.MagicMock(spec=['resource_name'])
+ resource.resource_name = 'volume'
+ volumes.create.return_value = resource
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.create_configuration(task)
+ pre = '/redfish/v1/Systems/1/Storage/1/Drives/'
+ expected_payload = {
+ 'Encrypted': False,
+ 'VolumeType': 'StripedWithParity',
+ 'RAIDType': 'RAID5',
+ 'CapacityBytes': 107374182400,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id1},
+ {'@odata.id': pre + self.drive_id2},
+ {'@odata.id': pre + self.drive_id3}
+ ]
+ }
+ }
+ self.mock_storage.volumes.create.assert_called_once_with(
+ expected_payload, apply_time=sushy.APPLY_TIME_IMMEDIATE)
+ mock_set_async_step_flags.assert_called_once_with(
+ task.node, reboot=False, skip_current_step=True, polling=True)
+ self.assertEqual(mock_get_async_step_return_state.call_count, 0)
+ self.assertEqual(mock_node_power_action.call_count, 0)
+ self.assertEqual(mock_build_agent_options.call_count, 0)
+ self.assertEqual(mock_prepare_ramdisk.call_count, 0)
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_1b_apply_time_on_reset(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 100,
+ 'raid_level': '5',
+ 'is_root_volume': True
+ }
+ ]
+ }
+ volumes = mock.MagicMock()
+ op_apply_time_support = mock.MagicMock()
+ op_apply_time_support.mapped_supported_values = [
+ sushy.APPLY_TIME_ON_RESET]
+ volumes.operation_apply_time_support = op_apply_time_support
+ self.mock_storage.volumes = volumes
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ task_mon = mock.MagicMock()
+ task_mon.task_monitor_uri = '/TaskService/123'
+ volumes.create.return_value = task_mon
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.create_configuration(task)
+ pre = '/redfish/v1/Systems/1/Storage/1/Drives/'
+ expected_payload = {
+ 'Encrypted': False,
+ 'VolumeType': 'StripedWithParity',
+ 'RAIDType': 'RAID5',
+ 'CapacityBytes': 107374182400,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id1},
+ {'@odata.id': pre + self.drive_id2},
+ {'@odata.id': pre + self.drive_id3}
+ ]
+ }
+ }
+ self.mock_storage.volumes.create.assert_called_once_with(
+ expected_payload, apply_time=sushy.APPLY_TIME_ON_RESET)
+ mock_set_async_step_flags.assert_called_once_with(
+ task.node, reboot=True, skip_current_step=True, polling=True)
+ mock_get_async_step_return_state.assert_called_once_with(
+ task.node)
+ mock_node_power_action.assert_called_once_with(task, states.REBOOT)
+ mock_build_agent_options.assert_called_once_with(task.node)
+ self.assertEqual(mock_prepare_ramdisk.call_count, 1)
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_2(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ # TODO(bdodd): update mock_storage to allow this to pass w/o Exception
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 100,
+ 'raid_level': '5',
+ 'is_root_volume': True,
+ 'disk_type': 'ssd'
+ },
+ {
+ 'size_gb': 500,
+ 'raid_level': '1',
+ 'disk_type': 'hdd'
+ }
+ ]
+ }
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertRaisesRegex(
+ exception.RedfishError,
+ 'failed to find matching physical disks for all logical disks',
+ task.driver.raid.create_configuration, task)
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_3(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 100,
+ 'raid_level': '5',
+ 'controller': 'Smart Array P822 in Slot 3',
+ # 'physical_disks': ['6I:1:5', '6I:1:6', '6I:1:7'],
+ 'physical_disks': [
+ '35D38F11ACEF7BD3',
+ '3F5A8C54207B7233',
+ '32ADF365C6C1B7BD'
+ ],
+ 'is_root_volume': True
+ }
+ ]
+ }
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.create_configuration(task)
+ pre = '/redfish/v1/Systems/1/Storage/1/Drives/'
+ expected_payload = {
+ 'Encrypted': False,
+ 'VolumeType': 'StripedWithParity',
+ 'RAIDType': 'RAID5',
+ 'CapacityBytes': 107374182400,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id1},
+ {'@odata.id': pre + self.drive_id2},
+ {'@odata.id': pre + self.drive_id3}
+ ]
+ }
+ }
+ self.mock_storage.volumes.create.assert_called_once_with(
+ expected_payload, apply_time=None
+ )
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_4(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ # TODO(bdodd): update self.mock_storage to add more drives to satisfy
+ # both logical disks
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 50,
+ 'raid_level': '1+0',
+ 'controller': 'RAID.Integrated.1-1',
+ 'volume_name': 'root_volume',
+ 'is_root_volume': True,
+ # 'physical_disks': [
+ # 'Disk.Bay.0:Encl.Int.0-1:RAID.Integrated.1-1',
+ # 'Disk.Bay.1:Encl.Int.0-1:RAID.Integrated.1-1'
+ # ]
+ 'physical_disks': [
+ '35D38F11ACEF7BD3',
+ '3F5A8C54207B7233',
+ '32ADF365C6C1B7BD',
+ '3D58ECBC375FD9F2'
+ ]
+ },
+ {
+ 'size_gb': 100,
+ 'raid_level': '5',
+ 'controller': 'RAID.Integrated.1-1',
+ 'volume_name': 'data_volume',
+ # 'physical_disks': [
+ # 'Disk.Bay.2:Encl.Int.0-1:RAID.Integrated.1-1',
+ # 'Disk.Bay.3:Encl.Int.0-1:RAID.Integrated.1-1',
+ # 'Disk.Bay.4:Encl.Int.0-1:RAID.Integrated.1-1'
+ # ]
+ 'physical_disks': [
+ '3F5A8C54207B7233',
+ '32ADF365C6C1B7BD',
+ '3D58ECBC375FD9F2'
+ ]
+ }
+ ]
+ }
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.create_configuration(task)
+ pre = '/redfish/v1/Systems/1/Storage/1/Drives/'
+ expected_payload1 = {
+ 'Encrypted': False,
+ 'VolumeType': 'SpannedMirrors',
+ 'RAIDType': 'RAID10',
+ 'CapacityBytes': 53687091200,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id1},
+ {'@odata.id': pre + self.drive_id2},
+ {'@odata.id': pre + self.drive_id3},
+ {'@odata.id': pre + self.drive_id4}
+ ]
+ }
+ }
+ expected_payload2 = {
+ 'Encrypted': False,
+ 'VolumeType': 'StripedWithParity',
+ 'RAIDType': 'RAID5',
+ 'CapacityBytes': 107374182400,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id2},
+ {'@odata.id': pre + self.drive_id3},
+ {'@odata.id': pre + self.drive_id4}
+ ]
+ }
+ }
+ self.assertEqual(
+ self.mock_storage.volumes.create.call_count, 2)
+ self.mock_storage.volumes.create.assert_any_call(
+ expected_payload1, apply_time=None
+ )
+ self.mock_storage.volumes.create.assert_any_call(
+ expected_payload2, apply_time=None
+ )
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_5a(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 100,
+ 'raid_level': '1',
+ 'controller': 'software'
+ },
+ {
+ 'size_gb': 'MAX',
+ 'raid_level': '0',
+ 'controller': 'software'
+ }
+ ]
+ }
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue,
+ "'physical_disks' is missing from logical_disk while "
+ "'size_gb'='MAX' was requested",
+ task.driver.raid.create_configuration, task)
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_5b(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 100,
+ 'raid_level': '1',
+ 'controller': 'software'
+ },
+ {
+ 'size_gb': 500,
+ 'raid_level': '0',
+ 'controller': 'software'
+ }
+ ]
+ }
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.create_configuration(task)
+ pre = '/redfish/v1/Systems/1/Storage/1/Drives/'
+ expected_payload1 = {
+ 'Encrypted': False,
+ 'VolumeType': 'Mirrored',
+ 'RAIDType': 'RAID1',
+ 'CapacityBytes': 107374182400,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id1},
+ {'@odata.id': pre + self.drive_id2}
+ ]
+ }
+ }
+ expected_payload2 = {
+ 'Encrypted': False,
+ 'VolumeType': 'NonRedundant',
+ 'RAIDType': 'RAID0',
+ 'CapacityBytes': 536870912000,
+ 'Links': {
+ 'Drives': [
+ {'@odata.id': pre + self.drive_id3}
+ ]
+ }
+ }
+ self.assertEqual(
+ self.mock_storage.volumes.create.call_count, 2)
+ self.mock_storage.volumes.create.assert_any_call(
+ expected_payload1, apply_time=None
+ )
+ self.mock_storage.volumes.create.assert_any_call(
+ expected_payload2, apply_time=None
+ )
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_create_config_case_6(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ target_raid_config = {
+ 'logical_disks': [
+ {
+ 'size_gb': 'MAX',
+ 'raid_level': '0',
+ 'controller': 'software',
+ 'physical_disks': [
+ {'size': '> 100'},
+ {'size': '> 100'}
+ ]
+ }
+ ]
+ }
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ self.node.target_raid_config = target_raid_config
+ self.node.save()
+ # TODO(bdodd): update when impl can handle disk size evaluation
+ # (see _calculate_volume_props())
+ """
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.create_configuration(task)
+ """
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_delete_config_immediate(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ mock_volumes = []
+ for i in ["1", "2"]:
+ mock_volumes.append(_mock_volume(
+ i, volume_type='Mirrored', raid_type='RAID1'))
+ op_apply_time_support = mock.MagicMock()
+ op_apply_time_support.mapped_supported_values = [
+ sushy.APPLY_TIME_IMMEDIATE, sushy.APPLY_TIME_ON_RESET]
+ self.mock_storage.volumes.operation_apply_time_support = (
+ op_apply_time_support)
+ self.mock_storage.volumes.get_members.return_value = mock_volumes
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.delete_configuration(task)
+ self.assertEqual(mock_volumes[0].delete.call_count, 1)
+ self.assertEqual(mock_volumes[1].delete.call_count, 1)
+ mock_set_async_step_flags.assert_called_once_with(
+ task.node, reboot=False, skip_current_step=True, polling=True)
+ self.assertEqual(mock_get_async_step_return_state.call_count, 0)
+ self.assertEqual(mock_node_power_action.call_count, 0)
+ self.assertEqual(mock_build_agent_options.call_count, 0)
+ self.assertEqual(mock_prepare_ramdisk.call_count, 0)
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot, 'prepare_ramdisk',
+ spec_set=True, autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(deploy_utils, 'get_async_step_return_state',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True)
+ def test_delete_config_on_reset(
+ self,
+ mock_set_async_step_flags,
+ mock_get_async_step_return_state,
+ mock_node_power_action,
+ mock_build_agent_options,
+ mock_prepare_ramdisk,
+ mock_get_system):
+ mock_volumes = []
+ for i in ["1", "2"]:
+ mock_volumes.append(_mock_volume(
+ i, volume_type='Mirrored', raid_type='RAID1'))
+ op_apply_time_support = mock.MagicMock()
+ op_apply_time_support.mapped_supported_values = [
+ sushy.APPLY_TIME_ON_RESET]
+ self.mock_storage.volumes.operation_apply_time_support = (
+ op_apply_time_support)
+ self.mock_storage.volumes.get_members.return_value = mock_volumes
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ task_mon = mock.MagicMock()
+ task_mon.task_monitor_uri = '/TaskService/123'
+ mock_volumes[0].delete.return_value = task_mon
+ mock_volumes[1].delete.return_value = task_mon
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.raid.delete_configuration(task)
+ self.assertEqual(mock_volumes[0].delete.call_count, 1)
+ self.assertEqual(mock_volumes[1].delete.call_count, 1)
+ mock_set_async_step_flags.assert_called_once_with(
+ task.node, reboot=True, skip_current_step=True, polling=True)
+ mock_get_async_step_return_state.assert_called_once_with(
+ task.node)
+ mock_node_power_action.assert_called_once_with(task, states.REBOOT)
+ mock_build_agent_options.assert_called_once_with(task.node)
+ self.assertEqual(mock_prepare_ramdisk.call_count, 1)
+
+ def test_volume_create_error_handler(self, mock_get_system):
+ volume_collection = self.mock_storage.volumes
+ sushy_error = sushy.exceptions.SushyError()
+ volume_collection.create.side_effect = sushy_error
+ mock_get_system.return_value.storage.get_members.return_value = [
+ self.mock_storage]
+ mock_error_handler = mock.Mock()
+ drive_id = '35D38F11ACEF7BD3'
+ physical_disks = [drive_id]
+ capacity_bytes = 53739520000
+ pre = '/redfish/v1/Systems/1/Storage/1/Drives/'
+ expected_payload = {
+ 'Encrypted': False,
+ 'VolumeType': 'Mirrored',
+ 'RAIDType': 'RAID1',
+ 'CapacityBytes': capacity_bytes,
+ 'Links': {
+ 'Drives': [
+ {
+ '@odata.id': pre + drive_id
+ }
+ ]
+ }
+ }
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ redfish_raid.create_virtual_disk(
+ task, None, physical_disks, '1', capacity_bytes,
+ error_handler=mock_error_handler)
+ self.assertEqual(mock_error_handler.call_count, 1)
+ mock_error_handler.assert_called_once_with(
+ task, sushy_error, volume_collection, expected_payload
+ )
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_utils.py b/ironic/tests/unit/drivers/modules/redfish/test_utils.py
index f41ac4189..1ea699fd8 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_utils.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_utils.py
@@ -226,6 +226,25 @@ class RedfishUtilsTestCase(db_base.DbTestCase):
self.assertEqual(fake_conn.get_system.call_count,
redfish_utils.CONF.redfish.connection_attempts)
+ def test_get_task_monitor(self):
+ redfish_utils._get_connection = mock.Mock()
+ fake_monitor = mock.Mock()
+ redfish_utils._get_connection.return_value = fake_monitor
+ uri = '/redfish/v1/TaskMonitor/FAKEMONITOR'
+
+ response = redfish_utils.get_task_monitor(self.node, uri)
+
+ self.assertEqual(fake_monitor, response)
+
+ def test_get_task_monitor_error(self):
+ redfish_utils._get_connection = mock.Mock()
+ uri = '/redfish/v1/TaskMonitor/FAKEMONITOR'
+ redfish_utils._get_connection.side_effect =\
+ sushy.exceptions.ResourceNotFoundError('GET', uri, mock.Mock())
+
+ self.assertRaises(exception.RedfishError,
+ redfish_utils.get_task_monitor, self.node, uri)
+
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache._sessions', {})
diff --git a/ironic/tests/unit/drivers/modules/test_image_utils.py b/ironic/tests/unit/drivers/modules/test_image_utils.py
index 2ebb84c5e..c0be7147b 100644
--- a/ironic/tests/unit/drivers/modules/test_image_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_image_utils.py
@@ -30,6 +30,7 @@ from ironic.tests.unit.objects import utils as obj_utils
sushy = importutils.try_import('sushy')
INFO_DICT = db_utils.get_test_redfish_info()
+INFO_DICT_ILO = db_utils.get_test_ilo_info()
class RedfishImageHandlerTestCase(db_base.DbTestCase):
@@ -100,19 +101,15 @@ class RedfishImageHandlerTestCase(db_base.DbTestCase):
mock_swift_api.delete_object.assert_called_once_with(
'ironic_redfish_container', object_name)
- @mock.patch.object(image_utils.ImageHandler, '_is_swift_enabled',
- autospec=True)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch.object(image_utils, 'shutil', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'mkdir', autospec=True)
def test_publish_image_local_link(
- self, mock_mkdir, mock_link, mock_shutil, mock_chmod,
- mock__is_swift):
- img_handler_obj = image_utils.ImageHandler(self.node.driver)
- mock__is_swift.return_value = False
+ self, mock_mkdir, mock_link, mock_shutil, mock_chmod):
self.config(use_swift=False, group='redfish')
self.config(http_url='http://localhost', group='deploy')
+ img_handler_obj = image_utils.ImageHandler(self.node.driver)
url = img_handler_obj.publish_image('file.iso', 'boot.iso')
@@ -124,16 +121,12 @@ class RedfishImageHandlerTestCase(db_base.DbTestCase):
'file.iso', '/httpboot/redfish/boot.iso')
mock_chmod.assert_called_once_with('file.iso', 0o644)
- @mock.patch.object(image_utils.ImageHandler, '_is_swift_enabled',
- autospec=True)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch.object(image_utils, 'shutil', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'mkdir', autospec=True)
def test_publish_image_local_copy(self, mock_mkdir, mock_link,
- mock_shutil, mock_chmod,
- mock__is_swift):
- mock__is_swift.return_value = False
+ mock_shutil, mock_chmod):
self.config(use_swift=False, group='redfish')
self.config(http_url='http://localhost', group='deploy')
img_handler_obj = image_utils.ImageHandler(self.node.driver)
@@ -152,12 +145,9 @@ class RedfishImageHandlerTestCase(db_base.DbTestCase):
mock_chmod.assert_called_once_with('/httpboot/redfish/boot.iso',
0o644)
- @mock.patch.object(image_utils.ImageHandler, '_is_swift_enabled',
- autospec=True)
@mock.patch.object(image_utils, 'ironic_utils', autospec=True)
- def test_unpublish_image_local(self, mock_ironic_utils, mock__is_swift):
+ def test_unpublish_image_local(self, mock_ironic_utils):
self.config(use_swift=False, group='redfish')
- mock__is_swift.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
img_handler_obj = image_utils.ImageHandler(self.node.driver)
@@ -171,6 +161,50 @@ class RedfishImageHandlerTestCase(db_base.DbTestCase):
expected_file)
+class IloImageHandlerTestCase(db_base.DbTestCase):
+
+ def setUp(self):
+ super(IloImageHandlerTestCase, self).setUp()
+ self.config(enabled_hardware_types=['ilo'],
+ enabled_power_interfaces=['ilo'],
+ enabled_boot_interfaces=['ilo-virtual-media'],
+ enabled_management_interfaces=['ilo'],
+ enabled_inspect_interfaces=['ilo'],
+ enabled_bios_interfaces=['ilo'])
+ self.node = obj_utils.create_test_node(
+ self.context, driver='ilo', driver_info=INFO_DICT_ILO)
+
+ def test_ilo_kernel_param_config(self):
+ self.config(kernel_append_params="console=ttyS1", group='ilo')
+ img_handler_obj = image_utils.ImageHandler(self.node.driver)
+ actual_k_param = img_handler_obj.kernel_params
+ expected_k_param = "console=ttyS1"
+
+ self.assertEqual(expected_k_param, actual_k_param)
+
+
+class Ilo5ImageHandlerTestCase(db_base.DbTestCase):
+
+ def setUp(self):
+ super(Ilo5ImageHandlerTestCase, self).setUp()
+ self.config(enabled_hardware_types=['ilo5'],
+ enabled_power_interfaces=['ilo'],
+ enabled_boot_interfaces=['ilo-virtual-media'],
+ enabled_management_interfaces=['ilo5'],
+ enabled_inspect_interfaces=['ilo'],
+ enabled_bios_interfaces=['ilo'])
+ self.node = obj_utils.create_test_node(
+ self.context, driver='ilo5', driver_info=INFO_DICT_ILO)
+
+ def test_ilo5_kernel_param_config(self):
+ self.config(kernel_append_params="console=ttyS1", group='ilo')
+ img_handler_obj = image_utils.ImageHandler(self.node.driver)
+ actual_k_param = img_handler_obj.kernel_params
+ expected_k_param = "console=ttyS1"
+
+ self.assertEqual(expected_k_param, actual_k_param)
+
+
class RedfishImageUtilsTestCase(db_base.DbTestCase):
def setUp(self):
diff --git a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
index 16ef663bd..2a4f08225 100644
--- a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
+++ b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
@@ -156,12 +156,14 @@ SUSHY_SPEC = (
'VIRTUAL_MEDIA_CD',
'VIRTUAL_MEDIA_FLOPPY',
'VIRTUAL_MEDIA_USBSTICK',
+ 'APPLY_TIME_IMMEDIATE',
'APPLY_TIME_ON_RESET',
'TASK_STATE_COMPLETED',
'HEALTH_OK',
'HEALTH_WARNING',
'SECURE_BOOT_RESET_KEYS_TO_DEFAULT',
'SECURE_BOOT_RESET_KEYS_DELETE_ALL',
+ 'VOLUME_TYPE_RAW_DEVICE'
)
SUSHY_AUTH_SPEC = (
diff --git a/ironic/tests/unit/drivers/third_party_driver_mocks.py b/ironic/tests/unit/drivers/third_party_driver_mocks.py
index e00d51d75..ba337a517 100644
--- a/ironic/tests/unit/drivers/third_party_driver_mocks.py
+++ b/ironic/tests/unit/drivers/third_party_driver_mocks.py
@@ -218,12 +218,14 @@ if not sushy:
VIRTUAL_MEDIA_CD='cd',
VIRTUAL_MEDIA_FLOPPY='floppy',
VIRTUAL_MEDIA_USBSTICK='usb',
+ APPLY_TIME_IMMEDIATE='immediate',
APPLY_TIME_ON_RESET='on reset',
TASK_STATE_COMPLETED='completed',
HEALTH_OK='ok',
HEALTH_WARNING='warning',
SECURE_BOOT_RESET_KEYS_TO_DEFAULT="ResetAllKeysToDefault",
SECURE_BOOT_RESET_KEYS_DELETE_ALL="DeleteAllKeys",
+ VOLUME_TYPE_RAW_DEVICE='rawdevice'
)
sys.modules['sushy'] = sushy
diff --git a/ironic/tests/unit/objects/test_portgroup.py b/ironic/tests/unit/objects/test_portgroup.py
index 29bab20d0..9c0dc788c 100644
--- a/ironic/tests/unit/objects/test_portgroup.py
+++ b/ironic/tests/unit/objects/test_portgroup.py
@@ -58,7 +58,7 @@ class TestPortgroupObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
portgroup = objects.Portgroup.get(self.context, address)
- mock_get_portgroup.assert_called_once_with(address)
+ mock_get_portgroup.assert_called_once_with(address, project=None)
self.assertEqual(self.context, portgroup._context)
def test_get_by_name(self):
diff --git a/ironic/tests/unit/objects/test_volume_connector.py b/ironic/tests/unit/objects/test_volume_connector.py
index 7030f4766..380caf982 100644
--- a/ironic/tests/unit/objects/test_volume_connector.py
+++ b/ironic/tests/unit/objects/test_volume_connector.py
@@ -84,7 +84,8 @@ class TestVolumeConnectorObject(db_base.DbTestCase,
self.context, limit=4, sort_key='uuid', sort_dir='asc')
mock_get_list.assert_called_once_with(
- limit=4, marker=None, sort_key='uuid', sort_dir='asc')
+ limit=4, marker=None, sort_key='uuid', sort_dir='asc',
+ project=None)
self.assertThat(volume_connectors, HasLength(1))
self.assertIsInstance(volume_connectors[0],
objects.VolumeConnector)
@@ -98,7 +99,8 @@ class TestVolumeConnectorObject(db_base.DbTestCase,
self.context, limit=4, sort_key='uuid', sort_dir='asc')
mock_get_list.assert_called_once_with(
- limit=4, marker=None, sort_key='uuid', sort_dir='asc')
+ limit=4, marker=None, sort_key='uuid', sort_dir='asc',
+ project=None)
self.assertEqual([], volume_connectors)
def test_list_by_node_id(self):
@@ -111,7 +113,8 @@ class TestVolumeConnectorObject(db_base.DbTestCase,
self.context, node_id, limit=10, sort_dir='desc')
mock_get_list_by_node_id.assert_called_once_with(
- node_id, limit=10, marker=None, sort_key=None, sort_dir='desc')
+ node_id, limit=10, marker=None, sort_key=None, sort_dir='desc',
+ project=None)
self.assertThat(volume_connectors, HasLength(1))
self.assertIsInstance(volume_connectors[0],
objects.VolumeConnector)
diff --git a/ironic/tests/unit/objects/test_volume_target.py b/ironic/tests/unit/objects/test_volume_target.py
index 3882a368c..cb57e6b39 100644
--- a/ironic/tests/unit/objects/test_volume_target.py
+++ b/ironic/tests/unit/objects/test_volume_target.py
@@ -83,7 +83,8 @@ class TestVolumeTargetObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
self.context, limit=4, sort_key='uuid', sort_dir='asc')
mock_get_list.assert_called_once_with(
- limit=4, marker=None, sort_key='uuid', sort_dir='asc')
+ limit=4, marker=None, sort_key='uuid', sort_dir='asc',
+ project=None)
self.assertThat(volume_targets, HasLength(1))
self.assertIsInstance(volume_targets[0],
objects.VolumeTarget)
@@ -97,7 +98,8 @@ class TestVolumeTargetObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
self.context, limit=4, sort_key='uuid', sort_dir='asc')
mock_get_list.assert_called_once_with(
- limit=4, marker=None, sort_key='uuid', sort_dir='asc')
+ limit=4, marker=None, sort_key='uuid', sort_dir='asc',
+ project=None)
self.assertEqual([], volume_targets)
def test_list_by_node_id(self):
@@ -109,7 +111,8 @@ class TestVolumeTargetObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
self.context, node_id, limit=10, sort_dir='desc')
mock_get_list_by_node_id.assert_called_once_with(
- node_id, limit=10, marker=None, sort_key=None, sort_dir='desc')
+ node_id, limit=10, marker=None, sort_key=None, sort_dir='desc',
+ project=None)
self.assertThat(volume_targets, HasLength(1))
self.assertIsInstance(volume_targets[0], objects.VolumeTarget)
self.assertEqual(self.context, volume_targets[0]._context)
@@ -124,7 +127,7 @@ class TestVolumeTargetObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
mock_get_list_by_volume_id.assert_called_once_with(
volume_id, limit=10, marker=None,
- sort_key=None, sort_dir='desc')
+ sort_key=None, sort_dir='desc', project=None)
self.assertThat(volume_targets, HasLength(1))
self.assertIsInstance(volume_targets[0], objects.VolumeTarget)
self.assertEqual(self.context, volume_targets[0]._context)