summaryrefslogtreecommitdiff
path: root/ironic
diff options
context:
space:
mode:
Diffstat (limited to 'ironic')
-rw-r--r--ironic/api/controllers/v1/port.py6
-rw-r--r--ironic/api/controllers/v1/portgroup.py2
-rw-r--r--ironic/common/driver_factory.py36
-rw-r--r--ironic/common/images.py22
-rw-r--r--ironic/common/neutron.py3
-rw-r--r--ironic/conductor/base_manager.py24
-rw-r--r--ironic/conf/default.py4
-rw-r--r--ironic/db/sqlalchemy/__init__.py16
-rw-r--r--ironic/dhcp/neutron.py6
-rw-r--r--ironic/drivers/modules/agent.py10
-rw-r--r--ironic/drivers/modules/agent_base.py72
-rw-r--r--ironic/drivers/modules/agent_power.py9
-rw-r--r--ironic/drivers/modules/boot_mode_utils.py8
-rw-r--r--ironic/drivers/modules/deploy_utils.py81
-rw-r--r--ironic/drivers/modules/drac/bios.py43
-rw-r--r--ironic/drivers/modules/drac/management.py33
-rw-r--r--ironic/drivers/modules/drac/power.py4
-rw-r--r--ironic/drivers/modules/drac/raid.py80
-rw-r--r--ironic/drivers/modules/ilo/bios.py20
-rw-r--r--ironic/drivers/modules/ilo/boot.py28
-rw-r--r--ironic/drivers/modules/ilo/common.py8
-rw-r--r--ironic/drivers/modules/ilo/management.py22
-rw-r--r--ironic/drivers/modules/ilo/raid.py24
-rw-r--r--ironic/drivers/modules/inspect_utils.py7
-rw-r--r--ironic/drivers/modules/ipmitool.py20
-rw-r--r--ironic/drivers/modules/irmc/bios.py4
-rw-r--r--ironic/drivers/modules/irmc/boot.py20
-rw-r--r--ironic/drivers/modules/irmc/management.py11
-rw-r--r--ironic/drivers/modules/redfish/bios.py23
-rw-r--r--ironic/drivers/modules/redfish/firmware_utils.py58
-rw-r--r--ironic/drivers/modules/redfish/management.py16
-rw-r--r--ironic/drivers/modules/redfish/raid.py24
-rw-r--r--ironic/drivers/utils.py17
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_allocation.py2
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py2
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_port.py15
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_portgroup.py2
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_volume_connector.py2
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_volume_target.py2
-rw-r--r--ironic/tests/unit/api/test_acl.py24
-rw-r--r--ironic/tests/unit/common/test_driver_factory.py45
-rw-r--r--ironic/tests/unit/common/test_images.py76
-rw-r--r--ironic/tests/unit/common/test_neutron.py5
-rw-r--r--ironic/tests/unit/conductor/test_base_manager.py21
-rw-r--r--ironic/tests/unit/conductor/test_manager.py46
-rw-r--r--ironic/tests/unit/db/test_node_history.py1
-rw-r--r--ironic/tests/unit/db/test_portgroups.py1
-rw-r--r--ironic/tests/unit/db/test_ports.py8
-rw-r--r--ironic/tests/unit/db/test_volume_connectors.py1
-rw-r--r--ironic/tests/unit/db/test_volume_targets.py4
-rw-r--r--ironic/tests/unit/dhcp/test_neutron.py6
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_noop.py3
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py88
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_management.py8
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspect_utils.py9
-rw-r--r--ironic/tests/unit/objects/test_volume_connector.py4
-rw-r--r--ironic/tests/unit/objects/test_volume_target.py4
57 files changed, 607 insertions, 533 deletions
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index eacbdf5db..0658fbf3f 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -407,10 +407,12 @@ class PortsController(rest.RestController):
and not uuidutils.is_uuid_like(node)):
raise exception.NotAcceptable()
+ resource_url = 'ports'
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
- sort_dir, fields=fields,
- detail=detail, project=project)
+ sort_dir, resource_url=resource_url,
+ fields=fields, detail=detail,
+ project=project)
@METRICS.timer('PortsController.detail')
@method.expose()
diff --git a/ironic/api/controllers/v1/portgroup.py b/ironic/api/controllers/v1/portgroup.py
index 9a2c2dc05..7900c4683 100644
--- a/ironic/api/controllers/v1/portgroup.py
+++ b/ironic/api/controllers/v1/portgroup.py
@@ -283,10 +283,12 @@ class PortgroupsController(pecan.rest.RestController):
fields = api_utils.get_request_return_fields(fields, detail,
_DEFAULT_RETURN_FIELDS)
+ resource_url = 'portgroups'
return self._get_portgroups_collection(node, address,
marker, limit,
sort_key, sort_dir,
fields=fields,
+ resource_url=resource_url,
detail=detail,
project=project)
diff --git a/ironic/common/driver_factory.py b/ironic/common/driver_factory.py
index 2bb4a57ee..3bea439ea 100644
--- a/ironic/common/driver_factory.py
+++ b/ironic/common/driver_factory.py
@@ -17,7 +17,7 @@ import collections
from oslo_concurrency import lockutils
from oslo_log import log
-from stevedore import named
+import stevedore
from ironic.common import exception
from ironic.common.i18n import _
@@ -404,7 +404,7 @@ class BaseDriverFactory(object):
cls._set_enabled_drivers()
cls._extension_manager = (
- named.NamedExtensionManager(
+ stevedore.NamedExtensionManager(
cls._entrypoint_name,
cls._enabled_driver_list,
invoke_on_load=True,
@@ -429,6 +429,34 @@ class BaseDriverFactory(object):
return ((ext.name, ext.obj) for ext in self._extension_manager)
+class InterfaceFactory(BaseDriverFactory):
+
+ # Name of a HardwareType attribute with a list of supported interfaces
+ _supported_driver_list_field = ''
+
+ @classmethod
+ def _set_enabled_drivers(cls):
+ super()._set_enabled_drivers()
+ if cls._enabled_driver_list:
+ return
+
+ tmp_ext_mgr = stevedore.ExtensionManager(
+ cls._entrypoint_name,
+ invoke_on_load=False, # do not create interfaces
+ on_load_failure_callback=cls._catch_driver_not_found)
+ cls_names = {v.plugin: k for (k, v) in tmp_ext_mgr.items()}
+
+ # Fallback: calculate based on hardware type defaults
+ for hw_type in hardware_types().values():
+ supported = getattr(hw_type, cls._supported_driver_list_field)[0]
+ try:
+ name = cls_names[supported]
+ except KeyError:
+ raise KeyError("%s not in %s" % (supported, cls_names))
+ if name not in cls._enabled_driver_list:
+ cls._enabled_driver_list.append(name)
+
+
def _warn_if_unsupported(ext):
if not ext.obj.supported:
LOG.warning('Driver "%s" is UNSUPPORTED. It has been deprecated '
@@ -443,10 +471,12 @@ class HardwareTypesFactory(BaseDriverFactory):
_INTERFACE_LOADERS = {
name: type('%sInterfaceFactory' % name.capitalize(),
- (BaseDriverFactory,),
+ (InterfaceFactory,),
{'_entrypoint_name': 'ironic.hardware.interfaces.%s' % name,
'_enabled_driver_list_config_option':
'enabled_%s_interfaces' % name,
+ '_supported_driver_list_field':
+ 'supported_%s_interfaces' % name,
'_logging_template':
"Loaded the following %s interfaces: %%s" % name})
for name in driver_base.ALL_INTERFACES
diff --git a/ironic/common/images.py b/ironic/common/images.py
index 0f83b8726..cf51d723c 100644
--- a/ironic/common/images.py
+++ b/ironic/common/images.py
@@ -24,7 +24,6 @@ import shutil
import time
from ironic_lib import disk_utils
-from ironic_lib import utils as ironic_utils
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import fileutils
@@ -107,8 +106,9 @@ def create_vfat_image(output_file, files_info=None, parameters=None,
mounting, creating filesystem, copying files, etc.
"""
try:
- ironic_utils.dd('/dev/zero', output_file, 'count=1',
- "bs=%dKiB" % fs_size_kib)
+ # TODO(sbaker): use ironic_lib.utils.dd when rootwrap has been removed
+ utils.execute('dd', 'if=/dev/zero', 'of=%s' % output_file, 'count=1',
+ 'bs=%dKiB' % fs_size_kib)
except processutils.ProcessExecutionError as e:
raise exception.ImageCreationFailed(image_type='vfat', error=e)
@@ -118,8 +118,9 @@ def create_vfat_image(output_file, files_info=None, parameters=None,
# The label helps ramdisks to find the partition containing
# the parameters (by using /dev/disk/by-label/ir-vfd-dev).
# NOTE: FAT filesystem label can be up to 11 characters long.
- ironic_utils.mkfs('vfat', output_file, label="ir-vfd-dev")
- utils.mount(output_file, tmpdir, '-o', 'umask=0')
+ # TODO(sbaker): use ironic_lib.utils.mkfs when rootwrap has been
+ # removed
+ utils.execute('mkfs', '-t', 'vfat', '-n', 'ir-vfd-de', output_file)
except processutils.ProcessExecutionError as e:
raise exception.ImageCreationFailed(image_type='vfat', error=e)
@@ -134,16 +135,15 @@ def create_vfat_image(output_file, files_info=None, parameters=None,
file_contents = '\n'.join(params_list)
utils.write_to_file(parameters_file, file_contents)
+ # use mtools to copy the files into the image in a single
+ # operation
+ utils.execute('mcopy', '-s', '%s/*' % tmpdir,
+ '-i', output_file, '::')
+
except Exception as e:
LOG.exception("vfat image creation failed. Error: %s", e)
raise exception.ImageCreationFailed(image_type='vfat', error=e)
- finally:
- try:
- utils.umount(tmpdir)
- except processutils.ProcessExecutionError as e:
- raise exception.ImageCreationFailed(image_type='vfat', error=e)
-
def _generate_cfg(kernel_params, template, options):
"""Generates a isolinux or grub configuration file.
diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py
index 3ab23424b..df5b5bd7a 100644
--- a/ironic/common/neutron.py
+++ b/ironic/common/neutron.py
@@ -345,7 +345,8 @@ def add_ports_to_network(task, network_uuid, security_groups=None):
wait_for_host_agent(
client, update_port_attrs['binding:host_id'])
port = client.create_port(**port_attrs)
- update_neutron_port(task.context, port.id, update_port_attrs)
+ port = update_neutron_port(task.context, port.id,
+ update_port_attrs)
if CONF.neutron.dhcpv6_stateful_address_count > 1:
_add_ip_addresses_for_ipv6_stateful(task.context, port, client)
if is_smart_nic:
diff --git a/ironic/conductor/base_manager.py b/ironic/conductor/base_manager.py
index d53e6af1e..aa684408f 100644
--- a/ironic/conductor/base_manager.py
+++ b/ironic/conductor/base_manager.py
@@ -40,7 +40,6 @@ from ironic.conductor import task_manager
from ironic.conductor import utils
from ironic.conf import CONF
from ironic.db import api as dbapi
-from ironic.drivers import base as driver_base
from ironic.drivers.modules import deploy_utils
from ironic import objects
from ironic.objects import fields as obj_fields
@@ -49,27 +48,6 @@ from ironic.objects import fields as obj_fields
LOG = log.getLogger(__name__)
-def _check_enabled_interfaces():
- """Sanity-check enabled_*_interfaces configs.
-
- We do this before we even bother to try to load up drivers. If we have any
- dynamic drivers enabled, then we need interfaces enabled as well.
-
- :raises: ConfigInvalid if an enabled interfaces config option is empty.
- """
- empty_confs = []
- iface_types = ['enabled_%s_interfaces' % i
- for i in driver_base.ALL_INTERFACES]
- for iface_type in iface_types:
- conf_value = getattr(CONF, iface_type)
- if not conf_value:
- empty_confs.append(iface_type)
- if empty_confs:
- msg = (_('Configuration options %s cannot be an empty list.') %
- ', '.join(empty_confs))
- raise exception.ConfigInvalid(error_msg=msg)
-
-
class BaseConductorManager(object):
def __init__(self, host, topic):
@@ -146,8 +124,6 @@ class BaseConductorManager(object):
use_groups=self._use_groups())
"""Consistent hash ring which maps drivers to conductors."""
- _check_enabled_interfaces()
-
# NOTE(tenbrae): these calls may raise DriverLoadError or
# DriverNotFound
# NOTE(vdrok): Instantiate network and storage interface factory on
diff --git a/ironic/conf/default.py b/ironic/conf/default.py
index 3a6d3721d..66555d146 100644
--- a/ironic/conf/default.py
+++ b/ironic/conf/default.py
@@ -121,7 +121,7 @@ driver_opts = [
cfg.StrOpt('default_inspect_interface',
help=_DEFAULT_IFACE_HELP.format('inspect')),
cfg.ListOpt('enabled_management_interfaces',
- default=['ipmitool', 'redfish'],
+ default=None, # automatically calculate
help=_ENABLED_IFACE_HELP.format('management')),
cfg.StrOpt('default_management_interface',
help=_DEFAULT_IFACE_HELP.format('management')),
@@ -131,7 +131,7 @@ driver_opts = [
cfg.StrOpt('default_network_interface',
help=_DEFAULT_IFACE_HELP.format('network')),
cfg.ListOpt('enabled_power_interfaces',
- default=['ipmitool', 'redfish'],
+ default=None, # automatically calculate
help=_ENABLED_IFACE_HELP.format('power')),
cfg.StrOpt('default_power_interface',
help=_DEFAULT_IFACE_HELP.format('power')),
diff --git a/ironic/db/sqlalchemy/__init__.py b/ironic/db/sqlalchemy/__init__.py
index e69de29bb..0f792361a 100644
--- a/ironic/db/sqlalchemy/__init__.py
+++ b/ironic/db/sqlalchemy/__init__.py
@@ -0,0 +1,16 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_db.sqlalchemy import enginefacade
+
+# NOTE(dtantsur): we want sqlite as close to a real database as possible.
+enginefacade.configure(sqlite_fk=True)
diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py
index 18ccae05c..a5cb09282 100644
--- a/ironic/dhcp/neutron.py
+++ b/ironic/dhcp/neutron.py
@@ -80,9 +80,9 @@ class NeutronDHCPApi(base.BaseDHCP):
update_opts = []
if len(fips) != 0:
- for fip in fips:
- ip_version = \
- ipaddress.ip_address(fip['ip_address']).version
+ ip_versions = {ipaddress.ip_address(fip['ip_address']).version
+ for fip in fips}
+ for ip_version in ip_versions:
for option in dhcp_options:
if option.get('ip_version', 4) == ip_version:
update_opts.append(option)
diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py
index 441d8c791..f309a1011 100644
--- a/ironic/drivers/modules/agent.py
+++ b/ironic/drivers/modules/agent.py
@@ -269,9 +269,7 @@ class CustomAgentDeploy(agent_base.AgentBaseMixin, agent_base.AgentDeployMixin,
# deploy step.
if not task.node.driver_internal_info.get('deployment_reboot'):
manager_utils.node_power_action(task, states.REBOOT)
- info = task.node.driver_internal_info
- info.pop('deployment_reboot', None)
- task.node.driver_internal_info = info
+ task.node.del_driver_internal_info('deployment_reboot')
task.node.save()
return states.DEPLOYWAIT
@@ -600,15 +598,13 @@ class AgentDeploy(CustomAgentDeploy):
# NOTE(mjturek): In the case of local boot using a partition image on
# ppc64* hardware we need to provide the 'PReP_Boot_partition_uuid' to
# direct where the bootloader should be installed.
- driver_internal_info = task.node.driver_internal_info
client = agent_client.get_client(task)
partition_uuids = client.get_partition_uuids(node).get(
'command_result') or {}
root_uuid = partition_uuids.get('root uuid')
if root_uuid:
- driver_internal_info['root_uuid_or_disk_id'] = root_uuid
- task.node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('root_uuid_or_disk_id', root_uuid)
task.node.save()
elif not iwdi:
LOG.error('No root UUID returned from the ramdisk for node '
@@ -738,7 +734,7 @@ class AgentRAID(base.RAIDInterface):
create_nonroot_volumes=create_nonroot_volumes)
# Rewrite it back to the node object, but no need to save it as
# we need to just send this to the agent ramdisk.
- node.driver_internal_info['target_raid_config'] = target_raid_config
+ node.set_driver_internal_info('target_raid_config', target_raid_config)
LOG.debug("Calling agent RAID create_configuration for node %(node)s "
"with the following target RAID configuration: %(target)s",
diff --git a/ironic/drivers/modules/agent_base.py b/ironic/drivers/modules/agent_base.py
index 8f5a4412a..8f480aca7 100644
--- a/ironic/drivers/modules/agent_base.py
+++ b/ironic/drivers/modules/agent_base.py
@@ -21,7 +21,6 @@ import collections
from ironic_lib import metrics_utils
from oslo_log import log
from oslo_utils import strutils
-from oslo_utils import timeutils
import tenacity
from ironic.common import boot_devices
@@ -209,15 +208,15 @@ def _post_step_reboot(task, step_type):
return
# Signify that we've rebooted
- driver_internal_info = task.node.driver_internal_info
- field = ('cleaning_reboot' if step_type == 'clean'
- else 'deployment_reboot')
- driver_internal_info[field] = True
- if not driver_internal_info.get('agent_secret_token_pregenerated', False):
+ if step_type == 'clean':
+ task.node.set_driver_internal_info('cleaning_reboot', True)
+ else:
+ task.node.set_driver_internal_info('deployment_reboot', True)
+ if not task.node.driver_internal_info.get(
+ 'agent_secret_token_pregenerated', False):
# Wipes out the existing recorded token because the machine will
# need to re-establish the token.
- driver_internal_info.pop('agent_secret_token', None)
- task.node.driver_internal_info = driver_internal_info
+ task.node.del_driver_internal_info('agent_secret_token')
task.node.save()
@@ -591,22 +590,17 @@ class HeartbeatMixin(object):
node = task.node
LOG.debug('Heartbeat from node %s in state %s (target state %s)',
node.uuid, node.provision_state, node.target_provision_state)
- driver_internal_info = node.driver_internal_info
- driver_internal_info['agent_url'] = callback_url
- driver_internal_info['agent_version'] = agent_version
- # Record the last heartbeat event time in UTC, so we can make
- # decisions about it later. Can be decoded to datetime object with:
- # datetime.datetime.strptime(var, "%Y-%m-%d %H:%M:%S.%f")
- driver_internal_info['agent_last_heartbeat'] = str(
- timeutils.utcnow().isoformat())
+ node.set_driver_internal_info('agent_url', callback_url)
+ node.set_driver_internal_info('agent_version', agent_version)
+ # Record the last heartbeat event time
+ node.timestamp_driver_internal_info('agent_last_heartbeat')
if agent_verify_ca:
- driver_internal_info['agent_verify_ca'] = agent_verify_ca
+ node.set_driver_internal_info('agent_verify_ca', agent_verify_ca)
if agent_status:
- driver_internal_info['agent_status'] = agent_status
+ node.set_driver_internal_info('agent_status', agent_status)
if agent_status_message:
- driver_internal_info['agent_status_message'] = \
- agent_status_message
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('agent_status_message',
+ agent_status_message)
node.save()
if node.provision_state in _HEARTBEAT_RECORD_ONLY:
@@ -840,13 +834,12 @@ class AgentBaseMixin(object):
steps[step['interface']].append(step)
# Save hardware manager version, steps, and date
- info = node.driver_internal_info
- info['hardware_manager_version'] = agent_result[
- 'hardware_manager_version']
- info['agent_cached_%s_steps' % step_type] = dict(steps)
- info['agent_cached_%s_steps_refreshed' % step_type] = str(
- timeutils.utcnow())
- node.driver_internal_info = info
+ node.set_driver_internal_info('hardware_manager_version',
+ agent_result['hardware_manager_version'])
+ node.set_driver_internal_info('agent_cached_%s_steps' % step_type,
+ dict(steps))
+ node.timestamp_driver_internal_info(
+ 'agent_cached_%s_steps_refreshed' % step_type)
node.save()
LOG.debug('Refreshed agent %(type)s step cache for node %(node)s: '
'%(steps)s', {'node': node.uuid, 'steps': steps,
@@ -896,9 +889,7 @@ class AgentBaseMixin(object):
'continuing from current step %(step)s.',
{'node': node.uuid, 'step': node.clean_step})
- driver_internal_info = node.driver_internal_info
- driver_internal_info['skip_current_clean_step'] = False
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('skip_current_clean_step', False)
node.save()
else:
# Restart the process, agent must have rebooted to new version
@@ -1335,20 +1326,11 @@ class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
software_raid=software_raid
)
if result['command_status'] == 'FAILED':
- if not whole_disk_image:
- msg = (_("Failed to install a bootloader when "
- "deploying node %(node)s. Error: %(error)s") %
- {'node': node.uuid,
- 'error': agent_client.get_command_error(result)})
- log_and_raise_deployment_error(task, msg)
- else:
- # Its possible the install will fail if the IPA image
- # has not been updated, log this and continue
- LOG.info('Could not install bootloader for whole disk '
- 'image for node %(node)s, Error: %(error)s"',
- {'node': node.uuid,
- 'error': agent_client.get_command_error(result)})
- return
+ msg = (_("Failed to install a bootloader when "
+ "deploying node %(node)s. Error: %(error)s") %
+ {'node': node.uuid,
+ 'error': agent_client.get_command_error(result)})
+ log_and_raise_deployment_error(task, msg)
try:
persistent = True
diff --git a/ironic/drivers/modules/agent_power.py b/ironic/drivers/modules/agent_power.py
index f6ffba58a..bbaa0cdaa 100644
--- a/ironic/drivers/modules/agent_power.py
+++ b/ironic/drivers/modules/agent_power.py
@@ -140,16 +140,15 @@ class AgentPower(base.PowerInterface):
self._client.reboot(node)
- info = node.driver_internal_info
# NOTE(dtantsur): wipe the agent token, otherwise the rebooted agent
# won't be able to heartbeat. This is mostly a precaution since the
# calling code in conductor is expected to handle it.
- if not info.get('agent_secret_token_pregenerated'):
- info.pop('agent_secret_token', None)
+ if not node.driver_internal_info.get(
+ 'agent_secret_token_pregenerated'):
+ node.del_driver_internal_info('agent_secret_token')
# NOTE(dtantsur): the URL may change on reboot, wipe it as well (but
# only after we call reboot).
- info.pop('agent_url', None)
- node.driver_internal_info = info
+ node.del_driver_internal_info('agent_url')
node.save()
LOG.debug('Requested reboot of node %(node)s via the agent, waiting '
diff --git a/ironic/drivers/modules/boot_mode_utils.py b/ironic/drivers/modules/boot_mode_utils.py
index c6a08d913..737aca24c 100644
--- a/ironic/drivers/modules/boot_mode_utils.py
+++ b/ironic/drivers/modules/boot_mode_utils.py
@@ -110,10 +110,8 @@ def sync_boot_mode(task):
# the new boot mode may be set in 'driver_internal_info/deploy_boot_mode'
if not ironic_boot_mode and not bm_boot_mode:
- driver_internal_info = node.driver_internal_info
default_boot_mode = CONF.deploy.default_boot_mode
- driver_internal_info['deploy_boot_mode'] = default_boot_mode
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('deploy_boot_mode', default_boot_mode)
node.save()
LOG.debug("Ironic node %(uuid)s boot mode will be set to default "
@@ -123,9 +121,7 @@ def sync_boot_mode(task):
_set_boot_mode_on_bm(task, default_boot_mode)
elif not ironic_boot_mode and bm_boot_mode:
- driver_internal_info = node.driver_internal_info
- driver_internal_info['deploy_boot_mode'] = bm_boot_mode
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('deploy_boot_mode', bm_boot_mode)
node.save()
LOG.debug("Ironic node %(uuid)s boot mode is set to boot mode "
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index a22b4ff2d..f46250e2a 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -306,30 +306,36 @@ def agent_add_clean_params(task):
:param task: a TaskManager instance.
"""
- info = task.node.driver_internal_info
random_iterations = CONF.deploy.shred_random_overwrite_iterations
- info['agent_erase_devices_iterations'] = random_iterations
+ node = task.node
+ node.set_driver_internal_info('agent_erase_devices_iterations',
+ random_iterations)
zeroize = CONF.deploy.shred_final_overwrite_with_zeros
- info['agent_erase_devices_zeroize'] = zeroize
+ node.set_driver_internal_info('agent_erase_devices_zeroize', zeroize)
erase_fallback = CONF.deploy.continue_if_disk_secure_erase_fails
- info['agent_continue_if_secure_erase_failed'] = erase_fallback
+ node.set_driver_internal_info('agent_continue_if_secure_erase_failed',
+ erase_fallback)
# NOTE(janders) ``agent_continue_if_ata_erase_failed`` is deprecated and
# will be removed in the "Y" cycle. The replacement option
# ``agent_continue_if_secure_erase_failed`` is used to control shred
# fallback for both ATA Secure Erase and NVMe Secure Erase.
# The ``agent_continue_if_ata_erase_failed`` line can
# be deleted along with this comment when support for it is fully removed.
- info['agent_continue_if_ata_erase_failed'] = erase_fallback
+ node.set_driver_internal_info('agent_continue_if_ata_erase_failed',
+ erase_fallback)
nvme_secure_erase = CONF.deploy.enable_nvme_secure_erase
- info['agent_enable_nvme_secure_erase'] = nvme_secure_erase
+ node.set_driver_internal_info('agent_enable_nvme_secure_erase',
+ nvme_secure_erase)
secure_erase = CONF.deploy.enable_ata_secure_erase
- info['agent_enable_ata_secure_erase'] = secure_erase
- info['disk_erasure_concurrency'] = CONF.deploy.disk_erasure_concurrency
- info['agent_erase_skip_read_only'] = CONF.deploy.erase_skip_read_only
+ node.set_driver_internal_info('agent_enable_ata_secure_erase',
+ secure_erase)
+ node.set_driver_internal_info('disk_erasure_concurrency',
+ CONF.deploy.disk_erasure_concurrency)
+ node.set_driver_internal_info('agent_erase_skip_read_only',
+ CONF.deploy.erase_skip_read_only)
- task.node.driver_internal_info = info
- task.node.save()
+ node.save()
def try_set_boot_device(task, device, persistent=True):
@@ -922,13 +928,12 @@ def _check_disk_layout_unchanged(node, i_info):
"""
# If a node has been deployed to, this is the instance information
# used for that deployment.
- driver_internal_info = node.driver_internal_info
- if 'instance' not in driver_internal_info:
+ if 'instance' not in node.driver_internal_info:
return
error_msg = ''
for param in DISK_LAYOUT_PARAMS:
- param_value = int(driver_internal_info['instance'][param])
+ param_value = int(node.driver_internal_info['instance'][param])
if param_value != int(i_info[param]):
error_msg += (_(' Deployed value of %(param)s was %(param_value)s '
'but requested value is %(request_value)s.') %
@@ -1271,19 +1276,17 @@ def populate_storage_driver_internal_info(task):
boot_capability = ("%s_volume_boot" % vol_type)
deploy_capability = ("%s_volume_deploy" % vol_type)
vol_uuid = boot_volume['uuid']
- driver_internal_info = node.driver_internal_info
if check_interface_capability(task.driver.boot, boot_capability):
- driver_internal_info['boot_from_volume'] = vol_uuid
+ node.set_driver_internal_info('boot_from_volume', vol_uuid)
# NOTE(TheJulia): This would be a convenient place to check
# if we need to know about deploying the volume.
if (check_interface_capability(task.driver.deploy, deploy_capability)
and task.driver.storage.should_write_image(task)):
- driver_internal_info['boot_from_volume_deploy'] = vol_uuid
+ node.set_driver_internal_info('boot_from_volume_deploy', vol_uuid)
# NOTE(TheJulia): This is also a useful place to include a
# root device hint since we should/might/be able to obtain
# and supply that information to IPA if it needs to write
# the image to the volume.
- node.driver_internal_info = driver_internal_info
node.save()
@@ -1305,10 +1308,8 @@ def tear_down_storage_configuration(task):
{'target': volume.uuid, 'node': task.node.uuid})
node = task.node
- driver_internal_info = node.driver_internal_info
- driver_internal_info.pop('boot_from_volume', None)
- driver_internal_info.pop('boot_from_volume_deploy', None)
- node.driver_internal_info = driver_internal_info
+ node.del_driver_internal_info('boot_from_volume')
+ node.del_driver_internal_info('boot_from_volume_deploy')
node.save()
@@ -1346,7 +1347,7 @@ def get_async_step_return_state(node):
return states.CLEANWAIT if node.clean_step else states.DEPLOYWAIT
-def _check_agent_token_prior_to_agent_reboot(driver_internal_info):
+def _check_agent_token_prior_to_agent_reboot(node):
"""Removes the agent token if it was not pregenerated.
Removal of the agent token in cases where it is not pregenerated
@@ -1357,11 +1358,11 @@ def _check_agent_token_prior_to_agent_reboot(driver_internal_info):
already included in the payload and must be generated again
upon lookup.
- :param driver_internal_info: The driver_interal_info dict object
- from a Node object.
+ :param node: The Node object.
"""
- if not driver_internal_info.get('agent_secret_token_pregenerated', False):
- driver_internal_info.pop('agent_secret_token', None)
+ if not node.driver_internal_info.get('agent_secret_token_pregenerated',
+ False):
+ node.del_driver_internal_info('agent_secret_token')
def set_async_step_flags(node, reboot=None, skip_current_step=None,
@@ -1383,25 +1384,25 @@ def set_async_step_flags(node, reboot=None, skip_current_step=None,
corresponding polling flag is not set in the node's
driver_internal_info.
"""
- info = node.driver_internal_info
- cleaning = {'reboot': 'cleaning_reboot',
- 'skip': 'skip_current_clean_step',
- 'polling': 'cleaning_polling'}
- deployment = {'reboot': 'deployment_reboot',
- 'skip': 'skip_current_deploy_step',
- 'polling': 'deployment_polling'}
- fields = cleaning if node.clean_step else deployment
+ if node.clean_step:
+ reboot_field = 'cleaning_reboot'
+ skip_field = 'skip_current_clean_step'
+ polling_field = 'cleaning_polling'
+ else:
+ reboot_field = 'deployment_reboot'
+ skip_field = 'skip_current_deploy_step'
+ polling_field = 'deployment_polling'
+
if reboot is not None:
- info[fields['reboot']] = reboot
+ node.set_driver_internal_info(reboot_field, reboot)
if reboot:
# If rebooting, we must ensure that we check and remove
# an agent token if necessary.
- _check_agent_token_prior_to_agent_reboot(info)
+ _check_agent_token_prior_to_agent_reboot(node)
if skip_current_step is not None:
- info[fields['skip']] = skip_current_step
+ node.set_driver_internal_info(skip_field, skip_current_step)
if polling is not None:
- info[fields['polling']] = polling
- node.driver_internal_info = info
+ node.set_driver_internal_info(polling_field, polling)
node.save()
diff --git a/ironic/drivers/modules/drac/bios.py b/ironic/drivers/modules/drac/bios.py
index 8e36ef4c7..8ea3ff51f 100644
--- a/ironic/drivers/modules/drac/bios.py
+++ b/ironic/drivers/modules/drac/bios.py
@@ -128,13 +128,14 @@ class DracWSManBIOS(base.BIOSInterface):
raise exception.DracOperationError(error=exc)
# Store JobID for the async job handler _check_node_bios_jobs
- driver_internal_info = node.driver_internal_info
- driver_internal_info.setdefault(
- 'bios_config_job_ids', []).append(commit_result)
- node.driver_internal_info = driver_internal_info
-
- # This method calls node.save(), bios_config_job_ids will be saved
- # automatically
+ bios_config_job_ids = node.driver_internal_info.get(
+ 'bios_config_job_ids', [])
+ bios_config_job_ids.append(commit_result)
+ node.set_driver_internal_info('bios_config_job_ids',
+ bios_config_job_ids)
+
+ # This method calls node.save(), bios_config_job_ids will then be
+ # saved.
# These flags are for the conductor to manage the asynchronous
# jobs that have been initiated by this method
deploy_utils.set_async_step_flags(
@@ -300,15 +301,15 @@ class DracWSManBIOS(base.BIOSInterface):
"""
if finished_job_ids is None:
finished_job_ids = []
- driver_internal_info = node.driver_internal_info
# take out the unfinished job ids from all the jobs
- unfinished_job_ids = [job_id for job_id
- in driver_internal_info['bios_config_job_ids']
- if job_id not in finished_job_ids]
+ unfinished_job_ids = [
+ job_id for job_id
+ in node.driver_internal_info['bios_config_job_ids']
+ if job_id not in finished_job_ids]
# assign the unfinished job ids back to the total list
# this will clear the finished jobs from the list
- driver_internal_info['bios_config_job_ids'] = unfinished_job_ids
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('bios_config_job_ids',
+ unfinished_job_ids)
node.save()
def _delete_cached_reboot_time(self, node):
@@ -316,12 +317,9 @@ class DracWSManBIOS(base.BIOSInterface):
:param node: an ironic node object
"""
- driver_internal_info = node.driver_internal_info
# Remove the last reboot time and factory reset time
- driver_internal_info.pop(
- 'factory_reset_time_before_reboot')
- driver_internal_info.pop('factory_reset_time')
- node.driver_internal_info = driver_internal_info
+ node.del_driver_internal_info('factory_reset_time_before_reboot')
+ node.del_driver_internal_info('factory_reset_time')
node.save()
def _set_failed(self, task, error_message):
@@ -414,14 +412,11 @@ class DracWSManBIOS(base.BIOSInterface):
raise exception.DracOperationError(error=exc)
# Store the last inventory time on reboot for async job handler
# _check_last_system_inventory_changed
- driver_internal_info = node.driver_internal_info
- driver_internal_info['factory_reset_time_before_reboot'] = \
- factory_reset_time_before_reboot
+ node.set_driver_internal_info('factory_reset_time_before_reboot',
+ factory_reset_time_before_reboot)
# Store the current time to later check if factory reset times out
- driver_internal_info['factory_reset_time'] = str(
- timeutils.utcnow(with_timezone=True))
+ node.timestamp_driver_internal_info('factory_reset_time')
- node.driver_internal_info = driver_internal_info
# rebooting the server to apply factory reset value
client.set_power_state('REBOOT')
diff --git a/ironic/drivers/modules/drac/management.py b/ironic/drivers/modules/drac/management.py
index d329a419c..f18f5e29e 100644
--- a/ironic/drivers/modules/drac/management.py
+++ b/ironic/drivers/modules/drac/management.py
@@ -445,9 +445,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
lambda m: m.import_system_configuration(
json.dumps(configuration["oem"]["data"])),)
- info = task.node.driver_internal_info
- info['import_task_monitor_url'] = task_monitor.task_monitor_uri
- task.node.driver_internal_info = info
+ task.node.set_driver_internal_info('import_task_monitor_url',
+ task_monitor.task_monitor_uri)
deploy_utils.set_async_step_flags(
task.node,
@@ -476,9 +475,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
"""
# Import is async operation, setting sub-step to store export config
# and indicate that it's being executed as part of composite step
- info = task.node.driver_internal_info
- info['export_configuration_location'] = export_configuration_location
- task.node.driver_internal_info = info
+ task.node.set_driver_internal_info('export_configuration_location',
+ export_configuration_location)
task.node.save()
return self.import_configuration(task, import_configuration_location)
@@ -521,9 +519,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
log_msg = ("Import configuration task failed for node "
"%(node)s. %(error)s" % {'node': task.node.uuid,
'error': error_msg})
- info = node.driver_internal_info
- info.pop('import_task_monitor_url', None)
- node.driver_internal_info = info
+ node.del_driver_internal_info('import_task_monitor_url')
node.save()
self._set_failed(task, log_msg, error_msg)
return
@@ -532,9 +528,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
import_task = task_monitor.get_task()
task.upgrade_lock()
- info = node.driver_internal_info
- info.pop('import_task_monitor_url', None)
- node.driver_internal_info = info
+ node.del_driver_internal_info('import_task_monitor_url')
succeeded = False
if (import_task.task_state == sushy.TASK_STATE_COMPLETED
@@ -557,8 +551,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
'task_monitor_url': task_monitor_url})
# If import executed as part of import_export_configuration
- export_configuration_location =\
- info.get('export_configuration_location')
+ export_configuration_location = node.driver_internal_info.get(
+ 'export_configuration_location')
if export_configuration_location:
# then do sync export configuration before finishing
self._cleanup_export_substep(node)
@@ -613,9 +607,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
manager_utils.deploying_error_handler(task, log_msg, error_msg)
def _cleanup_export_substep(self, node):
- driver_internal_info = node.driver_internal_info
- driver_internal_info.pop('export_configuration_location', None)
- node.driver_internal_info = driver_internal_info
+ node.del_driver_internal_info('export_configuration_location')
@METRICS.timer('DracRedfishManagement.clear_job_queue')
@base.verify_step(priority=0)
@@ -752,10 +744,9 @@ class DracWSManManagement(base.ManagementInterface):
# at the next boot. As a workaround, saving it to
# driver_internal_info and committing the change during
# power state change.
- driver_internal_info = node.driver_internal_info
- driver_internal_info['drac_boot_device'] = {'boot_device': device,
- 'persistent': persistent}
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('drac_boot_device',
+ {'boot_device': device,
+ 'persistent': persistent})
node.save()
@METRICS.timer('DracManagement.get_sensors_data')
diff --git a/ironic/drivers/modules/drac/power.py b/ironic/drivers/modules/drac/power.py
index 468cc64b6..c25ddfef3 100644
--- a/ironic/drivers/modules/drac/power.py
+++ b/ironic/drivers/modules/drac/power.py
@@ -74,7 +74,6 @@ def _get_power_state(node):
def _commit_boot_list_change(node):
- driver_internal_info = node.driver_internal_info
boot_device = node.driver_internal_info.get('drac_boot_device')
if boot_device is None:
@@ -83,8 +82,7 @@ def _commit_boot_list_change(node):
drac_management.set_boot_device(node, boot_device['boot_device'],
boot_device['persistent'])
- driver_internal_info['drac_boot_device'] = None
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('drac_boot_device', None)
node.save()
diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py
index a85e4d372..d4e633f9c 100644
--- a/ironic/drivers/modules/drac/raid.py
+++ b/ironic/drivers/modules/drac/raid.py
@@ -1007,19 +1007,14 @@ def _commit_to_controllers(node, controllers, substep="completed"):
if not controllers:
LOG.debug('No changes on any of the controllers on node %s',
node.uuid)
- driver_internal_info = node.driver_internal_info
- driver_internal_info['raid_config_substep'] = substep
- driver_internal_info['raid_config_parameters'] = []
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('raid_config_substep', substep)
+ node.set_driver_internal_info('raid_config_parameters', [])
node.save()
return
- driver_internal_info = node.driver_internal_info
- driver_internal_info['raid_config_substep'] = substep
- driver_internal_info['raid_config_parameters'] = []
-
- if 'raid_config_job_ids' not in driver_internal_info:
- driver_internal_info['raid_config_job_ids'] = []
+ i_raid_config_parameters = []
+ i_raid_config_job_ids = node.driver_internal_info.get(
+ 'raid_config_job_ids', [])
optional = drac_constants.RebootRequired.optional
@@ -1083,13 +1078,12 @@ def _commit_to_controllers(node, controllers, substep="completed"):
raid_config_job_ids=raid_config_job_ids,
raid_config_parameters=raid_config_parameters)
- driver_internal_info['raid_config_job_ids'].extend(job_details[
- 'raid_config_job_ids'])
-
- driver_internal_info['raid_config_parameters'].extend(job_details[
- 'raid_config_parameters'])
-
- node.driver_internal_info = driver_internal_info
+ i_raid_config_job_ids.extend(job_details['raid_config_job_ids'])
+ i_raid_config_parameters.extend(job_details['raid_config_parameters'])
+ node.set_driver_internal_info('raid_config_substep', substep)
+ node.set_driver_internal_info('raid_config_parameters',
+ i_raid_config_parameters)
+ node.set_driver_internal_info('raid_config_job_ids', i_raid_config_job_ids)
# Signal whether the node has been rebooted, that we do not need to execute
# the step again, and that this completion of this step is triggered
@@ -1472,10 +1466,9 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
deploy_utils.prepare_agent_boot(task)
# Reboot already done by non real time task
task.upgrade_lock()
- info = task.node.driver_internal_info
- info['raid_task_monitor_uris'] = [
- tm.task_monitor_uri for tm in task_mons]
- task.node.driver_internal_info = info
+ task.node.set_driver_internal_info(
+ 'raid_task_monitor_uris',
+ [tm.task_monitor_uri for tm in task_mons])
task.node.save()
return True
@@ -1526,27 +1519,25 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
'message': ', '.join(messages)}))
task.upgrade_lock()
- info = node.driver_internal_info
if failed_msgs:
error_msg = (_("Failed RAID configuration tasks: %(messages)s")
% {'messages': ', '.join(failed_msgs)})
log_msg = ("RAID configuration task failed for node "
"%(node)s. %(error)s" % {'node': node.uuid,
'error': error_msg})
- info.pop('raid_task_monitor_uris', None)
+ node.del_driver_internal_info('raid_task_monitor_uris')
self._set_failed(task, log_msg, error_msg)
else:
running_task_mon_uris = [x for x in task_mon_uris
if x not in completed_task_mon_uris]
if running_task_mon_uris:
- info['raid_task_monitor_uris'] = running_task_mon_uris
- node.driver_internal_info = info
+ node.set_driver_internal_info('raid_task_monitor_uris',
+ running_task_mon_uris)
# will check remaining jobs in the next period
else:
# all tasks completed and none of them failed
- info.pop('raid_task_monitor_uris', None)
+ node.del_driver_internal_info('raid_task_monitor_uris')
self._set_success(task)
- node.driver_internal_info = info
node.save()
def _set_failed(self, task, log_msg, error_msg):
@@ -1671,9 +1662,8 @@ class DracWSManRAID(base.RAIDInterface):
physical_disk_name)
# adding logical_disks to driver_internal_info to create virtual disks
- driver_internal_info = node.driver_internal_info
- driver_internal_info[
- "logical_disks_to_create"] = logical_disks_to_create
+ node.set_driver_internal_info('logical_disks_to_create',
+ logical_disks_to_create)
commit_results = None
if logical_disks_to_create:
@@ -1688,8 +1678,8 @@ class DracWSManRAID(base.RAIDInterface):
substep="create_virtual_disks")
volume_validation = True if commit_results else False
- driver_internal_info['volume_validation'] = volume_validation
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('volume_validation',
+ volume_validation)
node.save()
if commit_results:
@@ -1843,33 +1833,27 @@ class DracWSManRAID(base.RAIDInterface):
self._complete_raid_substep(task, node)
def _clear_raid_substep(self, node):
- driver_internal_info = node.driver_internal_info
- driver_internal_info.pop('raid_config_substep', None)
- driver_internal_info.pop('raid_config_parameters', None)
- node.driver_internal_info = driver_internal_info
+ node.del_driver_internal_info('raid_config_substep')
+ node.del_driver_internal_info('raid_config_parameters')
node.save()
def _set_raid_config_job_failure(self, node):
- driver_internal_info = node.driver_internal_info
- driver_internal_info['raid_config_job_failure'] = True
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('raid_config_job_failure', True)
node.save()
def _clear_raid_config_job_failure(self, node):
- driver_internal_info = node.driver_internal_info
- del driver_internal_info['raid_config_job_failure']
- node.driver_internal_info = driver_internal_info
+ node.del_driver_internal_info('raid_config_job_failure')
node.save()
def _delete_cached_config_job_id(self, node, finished_config_job_ids=None):
if finished_config_job_ids is None:
finished_config_job_ids = []
- driver_internal_info = node.driver_internal_info
- unfinished_job_ids = [job_id for job_id
- in driver_internal_info['raid_config_job_ids']
- if job_id not in finished_config_job_ids]
- driver_internal_info['raid_config_job_ids'] = unfinished_job_ids
- node.driver_internal_info = driver_internal_info
+ unfinished_job_ids = [
+ job_id for job_id
+ in node.driver_internal_info['raid_config_job_ids']
+ if job_id not in finished_config_job_ids]
+ node.set_driver_internal_info('raid_config_job_ids',
+ unfinished_job_ids)
node.save()
def _set_failed(self, task, config_job):
diff --git a/ironic/drivers/modules/ilo/bios.py b/ironic/drivers/modules/ilo/bios.py
index a60255423..cadbe3c85 100644
--- a/ironic/drivers/modules/ilo/bios.py
+++ b/ironic/drivers/modules/ilo/bios.py
@@ -100,13 +100,11 @@ class IloBIOS(base.BIOSInterface):
deploy_utils.set_async_step_flags(node, reboot=True,
skip_current_step=False)
- driver_internal_info = node.driver_internal_info
if step == 'apply_configuration':
- driver_internal_info['apply_bios'] = True
+ node.set_driver_internal_info('apply_bios', True)
else:
- driver_internal_info['reset_bios'] = True
+ node.set_driver_internal_info('reset_bios', True)
- node.driver_internal_info = driver_internal_info
node.save()
return return_state
@@ -122,11 +120,9 @@ class IloBIOS(base.BIOSInterface):
"""
node = task.node
- driver_internal_info = node.driver_internal_info
- driver_internal_info.pop('apply_bios', None)
- driver_internal_info.pop('reset_bios', None)
- task.node.driver_internal_info = driver_internal_info
- task.node.save()
+ node.del_driver_internal_info('apply_bios')
+ node.del_driver_internal_info('reset_bios')
+ node.save()
if step not in ('apply_configuration', 'factory_reset'):
errmsg = (_('Could not find the step %(step)s for the '
@@ -174,11 +170,10 @@ class IloBIOS(base.BIOSInterface):
"""
node = task.node
- driver_internal_info = node.driver_internal_info
data = {}
for setting in settings:
data.update({setting['name']: setting['value']})
- if not driver_internal_info.get('apply_bios'):
+ if not node.driver_internal_info.get('apply_bios'):
return self._execute_pre_boot_bios_step(
task, 'apply_configuration', data)
else:
@@ -198,9 +193,8 @@ class IloBIOS(base.BIOSInterface):
"""
node = task.node
- driver_internal_info = node.driver_internal_info
- if not driver_internal_info.get('reset_bios'):
+ if not node.driver_internal_info.get('reset_bios'):
return self._execute_pre_boot_bios_step(task, 'factory_reset')
else:
return self._execute_post_boot_bios_step(task, 'factory_reset')
diff --git a/ironic/drivers/modules/ilo/boot.py b/ironic/drivers/modules/ilo/boot.py
index 60e0c3a25..166499d29 100644
--- a/ironic/drivers/modules/ilo/boot.py
+++ b/ironic/drivers/modules/ilo/boot.py
@@ -288,9 +288,7 @@ def prepare_node_for_deploy(task):
# not provided.
# Since secure boot was disabled, we are in 'uefi' boot mode.
if boot_mode_utils.get_boot_mode_for_deploy(task.node) is None:
- driver_internal_info = task.node.driver_internal_info
- driver_internal_info['deploy_boot_mode'] = 'uefi'
- task.node.driver_internal_info = driver_internal_info
+ task.node.set_driver_internal_info('deploy_boot_mode', 'uefi')
task.node.save()
@@ -458,9 +456,7 @@ class IloVirtualMediaBoot(base.BootInterface):
# It will set iSCSI info onto iLO
if boot_mode == 'uefi':
# Need to set 'ilo_uefi_iscsi_boot' param for clean up
- driver_internal_info = task.node.driver_internal_info
- driver_internal_info['ilo_uefi_iscsi_boot'] = True
- task.node.driver_internal_info = driver_internal_info
+ task.node.set_driver_internal_info('ilo_uefi_iscsi_boot', True)
task.node.save()
task.driver.management.set_iscsi_boot_target(task)
manager_utils.node_set_boot_device(
@@ -516,9 +512,7 @@ class IloVirtualMediaBoot(base.BootInterface):
and task.node.driver_internal_info.get('ilo_uefi_iscsi_boot')):
# It will clear iSCSI info from iLO
task.driver.management.clear_iscsi_boot_target(task)
- driver_internal_info = task.node.driver_internal_info
- driver_internal_info.pop('ilo_uefi_iscsi_boot', None)
- task.node.driver_internal_info = driver_internal_info
+ task.node.del_driver_internal_info('ilo_uefi_iscsi_boot')
task.node.save()
else:
image_utils.cleanup_iso_image(task)
@@ -626,9 +620,7 @@ class IloPXEBoot(pxe.PXEBoot):
# Need to enable secure boot, if being requested
boot_mode_utils.configure_secure_boot_if_needed(task)
# Need to set 'ilo_uefi_iscsi_boot' param for clean up
- driver_internal_info = task.node.driver_internal_info
- driver_internal_info['ilo_uefi_iscsi_boot'] = True
- task.node.driver_internal_info = driver_internal_info
+ task.node.set_driver_internal_info('ilo_uefi_iscsi_boot', True)
task.node.save()
# It will set iSCSI info onto iLO
task.driver.management.set_iscsi_boot_target(task)
@@ -654,7 +646,6 @@ class IloPXEBoot(pxe.PXEBoot):
:raises: IloOperationError, if some operation on iLO failed.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
- driver_internal_info = task.node.driver_internal_info
if (deploy_utils.is_iscsi_boot(task)
and task.node.driver_internal_info.get('ilo_uefi_iscsi_boot')):
@@ -662,8 +653,7 @@ class IloPXEBoot(pxe.PXEBoot):
# It will clear iSCSI info from iLO in case of booting from
# volume in UEFI boot mode
task.driver.management.clear_iscsi_boot_target(task)
- driver_internal_info.pop('ilo_uefi_iscsi_boot', None)
- task.node.driver_internal_info = driver_internal_info
+ task.node.del_driver_internal_info('ilo_uefi_iscsi_boot')
task.node.save()
else:
# Volume boot in BIOS boot mode is handled using
@@ -723,9 +713,7 @@ class IloiPXEBoot(ipxe.iPXEBoot):
# Need to enable secure boot, if being requested
boot_mode_utils.configure_secure_boot_if_needed(task)
# Need to set 'ilo_uefi_iscsi_boot' param for clean up
- driver_internal_info = task.node.driver_internal_info
- driver_internal_info['ilo_uefi_iscsi_boot'] = True
- task.node.driver_internal_info = driver_internal_info
+ task.node.set_driver_internal_info('ilo_uefi_iscsi_boot', True)
task.node.save()
# It will set iSCSI info onto iLO
task.driver.management.set_iscsi_boot_target(task)
@@ -751,7 +739,6 @@ class IloiPXEBoot(ipxe.iPXEBoot):
:raises: IloOperationError, if some operation on iLO failed.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
- driver_internal_info = task.node.driver_internal_info
if (deploy_utils.is_iscsi_boot(task)
and task.node.driver_internal_info.get('ilo_uefi_iscsi_boot')):
@@ -759,8 +746,7 @@ class IloiPXEBoot(ipxe.iPXEBoot):
# It will clear iSCSI info from iLO in case of booting from
# volume in UEFI boot mode
task.driver.management.clear_iscsi_boot_target(task)
- driver_internal_info.pop('ilo_uefi_iscsi_boot', None)
- task.node.driver_internal_info = driver_internal_info
+ task.node.del_driver_internal_info('ilo_uefi_iscsi_boot')
task.node.save()
else:
# Volume boot in BIOS boot mode is handled using
diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py
index a69a4e3ec..2b5b8c0db 100644
--- a/ironic/drivers/modules/ilo/common.py
+++ b/ironic/drivers/modules/ilo/common.py
@@ -637,9 +637,7 @@ def update_boot_mode(task):
# No boot mode found. Check if default_boot_mode is defined
if not boot_mode and (CONF.ilo.default_boot_mode in ['bios', 'uefi']):
boot_mode = CONF.ilo.default_boot_mode
- driver_internal_info = node.driver_internal_info
- driver_internal_info['deploy_boot_mode'] = boot_mode
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('deploy_boot_mode', boot_mode)
node.save()
# Boot mode is computed, setting it for the deploy
@@ -679,9 +677,7 @@ def update_boot_mode(task):
"as pending boot mode is unknown.",
{'uuid': node.uuid, 'boot_mode': boot_mode})
- driver_internal_info = node.driver_internal_info
- driver_internal_info['deploy_boot_mode'] = boot_mode
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('deploy_boot_mode', boot_mode)
node.save()
diff --git a/ironic/drivers/modules/ilo/management.py b/ironic/drivers/modules/ilo/management.py
index 3e2eb5622..c9a8259e6 100644
--- a/ironic/drivers/modules/ilo/management.py
+++ b/ironic/drivers/modules/ilo/management.py
@@ -951,17 +951,13 @@ class IloManagement(base.ManagementInterface):
class Ilo5Management(IloManagement):
def _set_driver_internal_value(self, task, value, *keys):
- driver_internal_info = task.node.driver_internal_info
for key in keys:
- driver_internal_info[key] = value
- task.node.driver_internal_info = driver_internal_info
+ task.node.set_driver_internal_info(key, value)
task.node.save()
def _pop_driver_internal_values(self, task, *keys):
- driver_internal_info = task.node.driver_internal_info
for key in keys:
- driver_internal_info.pop(key, None)
- task.node.driver_internal_info = driver_internal_info
+ task.node.del_driver_internal_info(key)
task.node.save()
def _wait_for_disk_erase_status(self, node):
@@ -1041,7 +1037,6 @@ class Ilo5Management(IloManagement):
{'hdd': 'overwrite', 'ssd': 'block'})
node = task.node
self._validate_erase_pattern(erase_pattern, node)
- driver_internal_info = node.driver_internal_info
LOG.debug("Calling out-of-band sanitize disk erase for node %(node)s",
{'node': node.uuid})
try:
@@ -1056,7 +1051,7 @@ class Ilo5Management(IloManagement):
# First disk-erase will execute for HDD's and after reboot only
# try for SSD, since both share same redfish api and would be
# overwritten.
- if not driver_internal_info.get(
+ if not node.driver_internal_info.get(
'ilo_disk_erase_hdd_check') and ('HDD' in disk_types):
ilo_object.do_disk_erase('HDD', erase_pattern.get('hdd'))
self._set_driver_internal_value(
@@ -1066,7 +1061,7 @@ class Ilo5Management(IloManagement):
task, False, 'skip_current_clean_step')
return deploy_utils.reboot_to_finish_step(task)
- if not driver_internal_info.get(
+ if not node.driver_internal_info.get(
'ilo_disk_erase_ssd_check') and ('SSD' in disk_types):
ilo_object.do_disk_erase('SSD', erase_pattern.get('ssd'))
self._set_driver_internal_value(
@@ -1145,14 +1140,12 @@ class Ilo5Management(IloManagement):
:raises: InstanceDeployFailure, on failure to execute of deploy step.
"""
node = task.node
- driver_internal_info = node.driver_internal_info
- if driver_internal_info.get('clear_ca_certs_flag'):
+ if node.driver_internal_info.get('clear_ca_certs_flag'):
# NOTE(vmud213): Clear the flag and do nothing as this flow
# is part of the reboot required by the clean step that is
# already executed.
- driver_internal_info.pop('clear_ca_certs_flag', None)
- node.driver_internal_info = driver_internal_info
+ node.del_driver_internal_info('clear_ca_certs_flag')
node.save()
return
@@ -1167,8 +1160,7 @@ class Ilo5Management(IloManagement):
raise exception.NodeCleaningFailure(msg)
raise exception.InstanceDeployFailure(msg)
- driver_internal_info['clear_ca_certs_flag'] = True
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('clear_ca_certs_flag', True)
node.save()
deploy_opts = deploy_utils.build_agent_options(task.node)
diff --git a/ironic/drivers/modules/ilo/raid.py b/ironic/drivers/modules/ilo/raid.py
index ae701cc55..f3a32b3cf 100644
--- a/ironic/drivers/modules/ilo/raid.py
+++ b/ironic/drivers/modules/ilo/raid.py
@@ -78,24 +78,18 @@ class Ilo5RAID(base.RAIDInterface):
manager_utils.cleaning_error_handler(task, log_msg, errmsg=msg)
def _set_driver_internal_true_value(self, task, *keys):
- driver_internal_info = task.node.driver_internal_info
for key in keys:
- driver_internal_info[key] = True
- task.node.driver_internal_info = driver_internal_info
+ task.node.set_driver_internal_info(key, True)
task.node.save()
def _set_driver_internal_false_value(self, task, *keys):
- driver_internal_info = task.node.driver_internal_info
for key in keys:
- driver_internal_info[key] = False
- task.node.driver_internal_info = driver_internal_info
+ task.node.set_driver_internal_info(key, False)
task.node.save()
def _pop_driver_internal_values(self, task, *keys):
- driver_internal_info = task.node.driver_internal_info
for key in keys:
- driver_internal_info.pop(key, None)
- task.node.driver_internal_info = driver_internal_info
+ task.node.del_driver_internal_info(key)
task.node.save()
def _prepare_for_read_raid(self, task, raid_step):
@@ -157,9 +151,8 @@ class Ilo5RAID(base.RAIDInterface):
target_raid_config = raid.filter_target_raid_config(
node, create_root_volume=create_root_volume,
create_nonroot_volumes=create_nonroot_volumes)
- driver_internal_info = node.driver_internal_info
- driver_internal_info['target_raid_config'] = target_raid_config
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('target_raid_config',
+ target_raid_config)
node.save()
LOG.debug("Calling OOB RAID create_configuration for node %(node)s "
"with the following target RAID configuration: %(target)s",
@@ -168,7 +161,8 @@ class Ilo5RAID(base.RAIDInterface):
try:
# Raid configuration in progress, checking status
- if not driver_internal_info.get('ilo_raid_create_in_progress'):
+ if not node.driver_internal_info.get(
+ 'ilo_raid_create_in_progress'):
ilo_object.create_raid_configuration(target_raid_config)
self._prepare_for_read_raid(task, 'create_raid')
return deploy_utils.get_async_step_return_state(node)
@@ -221,12 +215,12 @@ class Ilo5RAID(base.RAIDInterface):
node = task.node
LOG.debug("OOB RAID delete_configuration invoked for node %s.",
node.uuid)
- driver_internal_info = node.driver_internal_info
ilo_object = ilo_common.get_ilo_object(node)
try:
# Raid configuration in progress, checking status
- if not driver_internal_info.get('ilo_raid_delete_in_progress'):
+ if not node.driver_internal_info.get(
+ 'ilo_raid_delete_in_progress'):
ilo_object.delete_raid_configuration()
self._prepare_for_read_raid(task, 'delete_raid')
return deploy_utils.get_async_step_return_state(node)
diff --git a/ironic/drivers/modules/inspect_utils.py b/ironic/drivers/modules/inspect_utils.py
index 911c5b402..89a13e658 100644
--- a/ironic/drivers/modules/inspect_utils.py
+++ b/ironic/drivers/modules/inspect_utils.py
@@ -14,6 +14,7 @@
# under the License.
from oslo_log import log as logging
+from oslo_utils import netutils
from ironic.common import exception
from ironic import objects
@@ -34,6 +35,12 @@ def create_ports_if_not_exist(task, macs):
"""
node = task.node
for mac in macs:
+ if not netutils.is_valid_mac(mac):
+ LOG.warning("Ignoring NIC address %(address)s for node %(node)s "
+ "because it is not a valid MAC",
+ {'address': mac, 'node': node.uuid})
+ continue
+
port_dict = {'address': mac, 'node_id': node.id}
port = objects.Port(task.context, **port_dict)
diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py
index b1c20c968..d5a699673 100644
--- a/ironic/drivers/modules/ipmitool.py
+++ b/ironic/drivers/modules/ipmitool.py
@@ -962,20 +962,18 @@ def _constructor_checks(driver):
def _allocate_port(task, host=None):
node = task.node
- dii = node.driver_internal_info or {}
allocated_port = console_utils.acquire_port(host=host)
- dii['allocated_ipmi_terminal_port'] = allocated_port
- node.driver_internal_info = dii
+ node.set_driver_internal_info('allocated_ipmi_terminal_port',
+ allocated_port)
node.save()
return allocated_port
def _release_allocated_port(task):
node = task.node
- dii = node.driver_internal_info or {}
- allocated_port = dii.pop('allocated_ipmi_terminal_port', None)
+ allocated_port = node.del_driver_internal_info(
+ 'allocated_ipmi_terminal_port')
if allocated_port:
- node.driver_internal_info = dii
node.save()
console_utils.release_port(allocated_port)
@@ -1255,16 +1253,18 @@ class IPMIManagement(base.ManagementInterface):
"""
driver_info = task.node.driver_info
- driver_internal_info = task.node.driver_internal_info
+ node = task.node
ifbd = driver_info.get('ipmi_force_boot_device', False)
driver_info = _parse_driver_info(task.node)
if (strutils.bool_from_string(ifbd)
- and driver_internal_info.get('persistent_boot_device')
- and driver_internal_info.get('is_next_boot_persistent', True)):
+ and node.driver_internal_info.get('persistent_boot_device')
+ and node.driver_internal_info.get('is_next_boot_persistent',
+ True)):
return {
- 'boot_device': driver_internal_info['persistent_boot_device'],
+ 'boot_device': node.driver_internal_info[
+ 'persistent_boot_device'],
'persistent': True
}
diff --git a/ironic/drivers/modules/irmc/bios.py b/ironic/drivers/modules/irmc/bios.py
index 55201b5d1..b2384a2b1 100644
--- a/ironic/drivers/modules/irmc/bios.py
+++ b/ironic/drivers/modules/irmc/bios.py
@@ -145,7 +145,5 @@ class IRMCBIOS(base.BIOSInterface):
delete_names)
def _resume_cleaning(self, task):
- driver_internal_info = task.node.driver_internal_info
- driver_internal_info['cleaning_reboot'] = True
- task.node.driver_internal_info = driver_internal_info
+ task.node.set_driver_internal_info('cleaning_reboot', True)
task.node.save()
diff --git a/ironic/drivers/modules/irmc/boot.py b/ironic/drivers/modules/irmc/boot.py
index 99e5bbc79..7438137f7 100644
--- a/ironic/drivers/modules/irmc/boot.py
+++ b/ironic/drivers/modules/irmc/boot.py
@@ -288,20 +288,20 @@ def _prepare_boot_iso(task, root_uuid):
for BIOS boot_mode failed.
"""
deploy_info = _parse_deploy_info(task.node)
- driver_internal_info = task.node.driver_internal_info
# fetch boot iso
if deploy_info.get('boot_iso'):
boot_iso_href = deploy_info['boot_iso']
if _is_image_href_ordinary_file_name(boot_iso_href):
- driver_internal_info['boot_iso'] = boot_iso_href
+ task.node.set_driver_internal_info('boot_iso', boot_iso_href)
else:
boot_iso_filename = _get_iso_name(task.node, label='boot')
boot_iso_fullpathname = os.path.join(
CONF.irmc.remote_image_share_root, boot_iso_filename)
images.fetch(task.context, boot_iso_href, boot_iso_fullpathname)
- driver_internal_info['boot_iso'] = boot_iso_filename
+ task.node.set_driver_internal_info('boot_iso',
+ boot_iso_filename)
# create boot iso
else:
@@ -329,10 +329,10 @@ def _prepare_boot_iso(task, root_uuid):
kernel_params=kernel_params,
boot_mode=boot_mode)
- driver_internal_info['boot_iso'] = boot_iso_filename
+ task.node.set_driver_internal_info('boot_iso',
+ boot_iso_filename)
# save driver_internal_info['boot_iso']
- task.node.driver_internal_info = driver_internal_info
task.node.save()
@@ -1047,8 +1047,8 @@ class IRMCVirtualMediaBoot(base.BootInterface, IRMCVolumeBootMixIn):
manager_utils.node_set_boot_device(task, boot_devices.DISK,
persistent=True)
else:
- driver_internal_info = node.driver_internal_info
- root_uuid_or_disk_id = driver_internal_info['root_uuid_or_disk_id']
+ root_uuid_or_disk_id = node.driver_internal_info[
+ 'root_uuid_or_disk_id']
self._configure_vmedia_boot(task, root_uuid_or_disk_id)
# Enable secure boot, if being requested
@@ -1073,11 +1073,9 @@ class IRMCVirtualMediaBoot(base.BootInterface, IRMCVolumeBootMixIn):
boot_mode_utils.deconfigure_secure_boot_if_needed(task)
_remove_share_file(_get_iso_name(task.node, label='boot'))
- driver_internal_info = task.node.driver_internal_info
- driver_internal_info.pop('boot_iso', None)
- driver_internal_info.pop('irmc_boot_iso', None)
+ task.node.del_driver_internal_info('boot_iso')
+ task.node.del_driver_internal_info('irmc_boot_iso')
- task.node.driver_internal_info = driver_internal_info
task.node.save()
_cleanup_vmedia_boot(task)
diff --git a/ironic/drivers/modules/irmc/management.py b/ironic/drivers/modules/irmc/management.py
index 99a719a15..079ae9e44 100644
--- a/ironic/drivers/modules/irmc/management.py
+++ b/ironic/drivers/modules/irmc/management.py
@@ -139,9 +139,8 @@ def backup_bios_config(task):
error=e)
# Save bios config into the driver_internal_info
- internal_info = task.node.driver_internal_info
- internal_info['irmc_bios_config'] = result['bios_config']
- task.node.driver_internal_info = internal_info
+ task.node.set_driver_internal_info('irmc_bios_config',
+ result['bios_config'])
task.node.save()
LOG.info('BIOS config is backed up successfully for node %s',
@@ -170,14 +169,12 @@ def _restore_bios_config(task):
def _remove_bios_config(task, reboot_flag=False):
"""Remove backup bios config from the node."""
- internal_info = task.node.driver_internal_info
- internal_info.pop('irmc_bios_config', None)
+ task.node.del_driver_internal_info('irmc_bios_config')
# NOTE(tiendc): If reboot flag is raised, then the BM will
# reboot and cause a bug if the next clean step is in-band.
# See https://storyboard.openstack.org/#!/story/2002731
if reboot_flag:
- internal_info['cleaning_reboot'] = True
- task.node.driver_internal_info = internal_info
+ task.node.set_driver_internal_info('cleaning_reboot', True)
task.node.save()
irmc_info = irmc_common.parse_driver_info(task.node)
diff --git a/ironic/drivers/modules/redfish/bios.py b/ironic/drivers/modules/redfish/bios.py
index 3ac7fcd6e..c2eb8fcbc 100644
--- a/ironic/drivers/modules/redfish/bios.py
+++ b/ironic/drivers/modules/redfish/bios.py
@@ -338,9 +338,8 @@ class RedfishBIOS(base.BIOSInterface):
:param task: a TaskManager instance containing the node to act on.
"""
- info = task.node.driver_internal_info
- info['post_factory_reset_reboot_requested'] = True
- task.node.driver_internal_info = info
+ task.node.set_driver_internal_info(
+ 'post_factory_reset_reboot_requested', True)
task.node.save()
deploy_utils.set_async_step_flags(task.node, reboot=True,
skip_current_step=False)
@@ -351,10 +350,9 @@ class RedfishBIOS(base.BIOSInterface):
:param task: a TaskManager instance containing the node to act on.
:param attributes: the requested BIOS attributes to update.
"""
- info = task.node.driver_internal_info
- info['post_config_reboot_requested'] = True
- info['requested_bios_attrs'] = attributes
- task.node.driver_internal_info = info
+ task.node.set_driver_internal_info('post_config_reboot_requested',
+ True)
+ task.node.set_driver_internal_info('requested_bios_attrs', attributes)
task.node.save()
deploy_utils.set_async_step_flags(task.node, reboot=True,
skip_current_step=False)
@@ -364,12 +362,11 @@ class RedfishBIOS(base.BIOSInterface):
:param task: a TaskManager instance containing the node to act on.
"""
- info = task.node.driver_internal_info
- info.pop('post_config_reboot_requested', None)
- info.pop('post_factory_reset_reboot_requested', None)
- info.pop('requested_bios_attrs', None)
- task.node.driver_internal_info = info
- task.node.save()
+ node = task.node
+ node.del_driver_internal_info('post_config_reboot_requested')
+ node.del_driver_internal_info('post_factory_reset_reboot_requested')
+ node.del_driver_internal_info('requested_bios_attrs')
+ node.save()
def _set_step_failed(self, task, attrs_not_updated):
"""Fail the cleaning or deployment step and log the error.
diff --git a/ironic/drivers/modules/redfish/firmware_utils.py b/ironic/drivers/modules/redfish/firmware_utils.py
new file mode 100644
index 000000000..35e4bb1f2
--- /dev/null
+++ b/ironic/drivers/modules/redfish/firmware_utils.py
@@ -0,0 +1,58 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import jsonschema
+from oslo_log import log
+
+from ironic.common import exception
+from ironic.common.i18n import _
+
+LOG = log.getLogger(__name__)
+
+_UPDATE_FIRMWARE_SCHEMA = {
+ "$schema": "http://json-schema.org/schema#",
+ "title": "update_firmware clean step schema",
+ "type": "array",
+ # list of firmware update images
+ "items": {
+ "type": "object",
+ "required": ["url"],
+ "properties": {
+ "url": {
+ "description": "URL for firmware file",
+ "type": "string",
+ "minLength": 1
+ },
+ "wait": {
+ "description": "optional wait time for firmware update",
+ "type": "integer",
+ "minimum": 1
+ }
+ },
+ "additionalProperties": False
+ }
+}
+
+
+def validate_update_firmware_args(firmware_images):
+ """Validate ``update_firmware`` step input argument
+
+ :param firmware_images: args to validate.
+ :raises: InvalidParameterValue When argument is not valid
+ """
+ try:
+ jsonschema.validate(firmware_images, _UPDATE_FIRMWARE_SCHEMA)
+ except jsonschema.ValidationError as err:
+ raise exception.InvalidParameterValue(
+ _('Invalid firmware update %(firmware_images)s. Errors: %(err)s')
+ % {'firmware_images': firmware_images, 'err': err})
diff --git a/ironic/drivers/modules/redfish/management.py b/ironic/drivers/modules/redfish/management.py
index b3517656c..cb56a821b 100644
--- a/ironic/drivers/modules/redfish/management.py
+++ b/ironic/drivers/modules/redfish/management.py
@@ -35,6 +35,7 @@ from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
+from ironic.drivers.modules.redfish import firmware_utils
from ironic.drivers.modules.redfish import utils as redfish_utils
LOG = log.getLogger(__name__)
@@ -759,6 +760,7 @@ class RedfishManagement(base.ManagementInterface):
:returns: None if it is completed.
:raises: RedfishError on an error from the Sushy library.
"""
+ firmware_utils.validate_update_firmware_args(firmware_images)
node = task.node
LOG.debug('Updating firmware on node %(node_uuid)s with firmware '
@@ -806,10 +808,9 @@ class RedfishManagement(base.ManagementInterface):
task_monitor = update_service.simple_update(firmware_url)
- driver_internal_info = node.driver_internal_info
firmware_update['task_monitor'] = task_monitor.task_monitor_uri
- driver_internal_info['firmware_updates'] = firmware_updates
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('firmware_updates',
+ firmware_updates)
def _continue_firmware_updates(self, task, update_service,
firmware_updates):
@@ -838,9 +839,8 @@ class RedfishManagement(base.ManagementInterface):
'firmware_image': firmware_update['url'],
'node': node.uuid})
- driver_internal_info = node.driver_internal_info
- driver_internal_info['firmware_updates'] = firmware_updates
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('firmware_updates',
+ firmware_updates)
node.save()
return
@@ -866,9 +866,7 @@ class RedfishManagement(base.ManagementInterface):
:param node: the node to clear the firmware updates from
"""
- driver_internal_info = node.driver_internal_info
- driver_internal_info.pop('firmware_updates', None)
- node.driver_internal_info = driver_internal_info
+ node.del_driver_internal_info('firmware_updates')
node.save()
@METRICS.timer('RedfishManagement._query_firmware_update_failed')
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index a7a510811..aa4294497 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -841,9 +841,7 @@ class RedfishRAID(base.RAIDInterface):
reboot_required = True
raid_configs.append(raid_config)
- driver_internal_info = node.driver_internal_info
- driver_internal_info['raid_configs'] = raid_configs
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('raid_configs', raid_configs)
return_state = None
deploy_utils.set_async_step_flags(
@@ -914,9 +912,7 @@ class RedfishRAID(base.RAIDInterface):
reboot_required = True
raid_configs.append(raid_config)
- driver_internal_info = node.driver_internal_info
- driver_internal_info['raid_configs'] = raid_configs
- node.driver_internal_info = driver_internal_info
+ node.set_driver_internal_info('raid_configs', raid_configs)
return_state = None
deploy_utils.set_async_step_flags(
@@ -1007,17 +1003,15 @@ class RedfishRAID(base.RAIDInterface):
:param node: the node to clear the RAID configs from
"""
- driver_internal_info = node.driver_internal_info
- driver_internal_info.pop('raid_configs', None)
- node.driver_internal_info = driver_internal_info
+ node.del_driver_internal_info('raid_configs')
node.save()
@METRICS.timer('RedfishRAID._query_raid_config_failed')
@periodics.node_periodic(
purpose='checking async RAID config failed',
spacing=CONF.redfish.raid_config_fail_interval,
- filters={'reserved': False, 'provision_state': states.CLEANFAIL,
- 'maintenance': True},
+ filters={'reserved': False, 'provision_state_in': {
+ states.CLEANFAIL, states.DEPLOYFAIL}, 'maintenance': True},
predicate_extra_fields=['driver_internal_info'],
predicate=lambda n: n.driver_internal_info.get('raid_configs'),
)
@@ -1038,7 +1032,8 @@ class RedfishRAID(base.RAIDInterface):
@periodics.node_periodic(
purpose='checking async RAID config tasks',
spacing=CONF.redfish.raid_config_status_interval,
- filters={'reserved': False, 'provision_state': states.CLEANWAIT},
+ filters={'reserved': False, 'provision_state_in': {
+ states.CLEANWAIT, states.DEPLOYWAIT}},
predicate_extra_fields=['driver_internal_info'],
predicate=lambda n: n.driver_internal_info.get('raid_configs'),
)
@@ -1116,4 +1111,7 @@ class RedfishRAID(base.RAIDInterface):
self._clear_raid_configs(node)
LOG.info('RAID configuration completed for node %(node)s',
{'node': node.uuid})
- manager_utils.notify_conductor_resume_clean(task)
+ if task.node.clean_step:
+ manager_utils.notify_conductor_resume_clean(task)
+ else:
+ manager_utils.notify_conductor_resume_deploy(task)
diff --git a/ironic/drivers/utils.py b/ironic/drivers/utils.py
index 55b18542b..b90660b59 100644
--- a/ironic/drivers/utils.py
+++ b/ironic/drivers/utils.py
@@ -194,13 +194,12 @@ def ensure_next_boot_device(task, driver_info):
"""
ifbd = driver_info.get('force_boot_device', False)
if strutils.bool_from_string(ifbd):
- driver_internal_info = task.node.driver_internal_info
- if driver_internal_info.get('is_next_boot_persistent') is False:
- driver_internal_info.pop('is_next_boot_persistent', None)
- task.node.driver_internal_info = driver_internal_info
+ info = task.node.driver_internal_info
+ if info.get('is_next_boot_persistent') is False:
+ task.node.del_driver_internal_info('is_next_boot_persistent')
task.node.save()
else:
- boot_device = driver_internal_info.get('persistent_boot_device')
+ boot_device = info.get('persistent_boot_device')
if boot_device:
utils.node_set_boot_device(task, boot_device)
@@ -218,14 +217,12 @@ def force_persistent_boot(task, device, persistent):
"""
node = task.node
- driver_internal_info = node.driver_internal_info
if persistent:
- driver_internal_info.pop('is_next_boot_persistent', None)
- driver_internal_info['persistent_boot_device'] = device
+ node.del_driver_internal_info('is_next_boot_persistent')
+ node.set_driver_internal_info('persistent_boot_device', device)
else:
- driver_internal_info['is_next_boot_persistent'] = False
+ node.set_driver_internal_info('is_next_boot_persistent', False)
- node.driver_internal_info = driver_internal_info
node.save()
diff --git a/ironic/tests/unit/api/controllers/v1/test_allocation.py b/ironic/tests/unit/api/controllers/v1/test_allocation.py
index 1be54db80..367c06350 100644
--- a/ironic/tests/unit/api/controllers/v1/test_allocation.py
+++ b/ironic/tests/unit/api/controllers/v1/test_allocation.py
@@ -420,6 +420,8 @@ class TestListAllocations(test_api_base.BaseApiTest):
node_id = self.node.id
else:
node_id = 100000 + i
+ obj_utils.create_test_node(self.context, id=node_id,
+ uuid=uuidutils.generate_uuid())
obj_utils.create_test_allocation(
self.context,
node_id=node_id,
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index b642f3ee3..ee957178c 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -1499,6 +1499,7 @@ class TestListNodes(test_api_base.BaseApiTest):
headers=headers)
self.assertEqual(1, len(data['portgroups']))
self.assertIn('next', data)
+ self.assertIn('portgroups', data['next'])
def test_portgroups_subresource_link(self):
node = obj_utils.create_test_node(self.context)
@@ -1537,6 +1538,7 @@ class TestListNodes(test_api_base.BaseApiTest):
data = self.get_json('/nodes/%s/ports?limit=1' % node.uuid)
self.assertEqual(1, len(data['ports']))
self.assertIn('next', data)
+ self.assertIn('ports', data['next'])
def test_ports_subresource_noid(self):
node = obj_utils.create_test_node(self.context)
diff --git a/ironic/tests/unit/api/controllers/v1/test_port.py b/ironic/tests/unit/api/controllers/v1/test_port.py
index 1f3322acc..12208e049 100644
--- a/ironic/tests/unit/api/controllers/v1/test_port.py
+++ b/ironic/tests/unit/api/controllers/v1/test_port.py
@@ -659,6 +659,9 @@ class TestListPorts(test_api_base.BaseApiTest):
return True
mock_authorize.side_effect = mock_authorize_function
+ another_node = obj_utils.create_test_node(
+ self.context, uuid=uuidutils.generate_uuid())
+
ports = []
# these ports should be retrieved by the API call
for id_ in range(0, 2):
@@ -670,7 +673,8 @@ class TestListPorts(test_api_base.BaseApiTest):
# these ports should NOT be retrieved by the API call
for id_ in range(3, 5):
port = obj_utils.create_test_port(
- self.context, uuid=uuidutils.generate_uuid(),
+ self.context, node_id=another_node.id,
+ uuid=uuidutils.generate_uuid(),
address='52:54:00:cf:2d:3%s' % id_)
data = self.get_json('/ports', headers={'X-Project-Id': '12345'})
self.assertEqual(len(ports), len(data['ports']))
@@ -896,6 +900,8 @@ class TestListPorts(test_api_base.BaseApiTest):
node_id = self.node.id
else:
node_id = 100000 + i
+ obj_utils.create_test_node(self.context, id=node_id,
+ uuid=uuidutils.generate_uuid())
obj_utils.create_test_port(self.context,
node_id=node_id,
uuid=uuidutils.generate_uuid(),
@@ -920,6 +926,8 @@ class TestListPorts(test_api_base.BaseApiTest):
node_id = self.node.id
else:
node_id = 100000 + i
+ obj_utils.create_test_node(self.context, id=node_id,
+ uuid=uuidutils.generate_uuid())
obj_utils.create_test_port(self.context,
node_id=node_id,
uuid=uuidutils.generate_uuid(),
@@ -947,6 +955,8 @@ class TestListPorts(test_api_base.BaseApiTest):
node_id = self.node.id
else:
node_id = 100000 + i
+ obj_utils.create_test_node(self.context, id=node_id,
+ uuid=uuidutils.generate_uuid())
obj_utils.create_test_port(self.context,
node_id=node_id,
uuid=uuidutils.generate_uuid(),
@@ -1056,7 +1066,8 @@ class TestListPorts(test_api_base.BaseApiTest):
return True
mock_authorize.side_effect = mock_authorize_function
- pg = obj_utils.create_test_portgroup(self.context)
+ pg = obj_utils.create_test_portgroup(self.context,
+ node_id=self.node.id)
obj_utils.create_test_port(self.context, node_id=self.node.id,
portgroup_id=pg.id)
data = self.get_json('/ports/detail?portgroup=%s' % pg.uuid,
diff --git a/ironic/tests/unit/api/controllers/v1/test_portgroup.py b/ironic/tests/unit/api/controllers/v1/test_portgroup.py
index 394e502ef..07b95c428 100644
--- a/ironic/tests/unit/api/controllers/v1/test_portgroup.py
+++ b/ironic/tests/unit/api/controllers/v1/test_portgroup.py
@@ -546,6 +546,8 @@ class TestListPortgroups(test_api_base.BaseApiTest):
node_id = self.node.id
else:
node_id = 100000 + i
+ obj_utils.create_test_node(self.context, id=node_id,
+ uuid=uuidutils.generate_uuid())
obj_utils.create_test_portgroup(
self.context,
node_id=node_id,
diff --git a/ironic/tests/unit/api/controllers/v1/test_volume_connector.py b/ironic/tests/unit/api/controllers/v1/test_volume_connector.py
index 2da6eb4ea..70e11a347 100644
--- a/ironic/tests/unit/api/controllers/v1/test_volume_connector.py
+++ b/ironic/tests/unit/api/controllers/v1/test_volume_connector.py
@@ -353,6 +353,8 @@ class TestListVolumeConnectors(test_api_base.BaseApiTest):
node_id = self.node.id
else:
node_id = 100000 + i
+ obj_utils.create_test_node(self.context, id=node_id,
+ uuid=uuidutils.generate_uuid())
obj_utils.create_test_volume_connector(
self.context, node_id=node_id,
uuid=uuidutils.generate_uuid(),
diff --git a/ironic/tests/unit/api/controllers/v1/test_volume_target.py b/ironic/tests/unit/api/controllers/v1/test_volume_target.py
index ffe8aa594..038f3cb58 100644
--- a/ironic/tests/unit/api/controllers/v1/test_volume_target.py
+++ b/ironic/tests/unit/api/controllers/v1/test_volume_target.py
@@ -328,6 +328,8 @@ class TestListVolumeTargets(test_api_base.BaseApiTest):
node_id = self.node.id
else:
node_id = 100000 + i
+ obj_utils.create_test_node(self.context, id=node_id,
+ uuid=uuidutils.generate_uuid())
obj_utils.create_test_volume_target(
self.context, node_id=node_id,
uuid=uuidutils.generate_uuid(), boot_index=i)
diff --git a/ironic/tests/unit/api/test_acl.py b/ironic/tests/unit/api/test_acl.py
index 99921e988..5793e95a8 100644
--- a/ironic/tests/unit/api/test_acl.py
+++ b/ironic/tests/unit/api/test_acl.py
@@ -26,6 +26,7 @@ from oslo_config import cfg
from ironic.api.controllers.v1 import versions as api_versions
from ironic.common import exception
from ironic.conductor import rpcapi
+from ironic.db import api as db_api
from ironic.tests.unit.api import base
from ironic.tests.unit.db import utils as db_utils
@@ -238,7 +239,6 @@ class TestRBACModelBeforeScopesBase(TestACLBase):
def _create_test_data(self):
allocated_node_id = 31
fake_db_allocation = db_utils.create_test_allocation(
- node_id=allocated_node_id,
resource_class="CUSTOM_TEST")
fake_db_node = db_utils.create_test_node(
chassis_id=None,
@@ -247,10 +247,12 @@ class TestRBACModelBeforeScopesBase(TestACLBase):
fake_db_node_alloced = db_utils.create_test_node(
id=allocated_node_id,
chassis_id=None,
- allocation_id=fake_db_allocation['id'],
uuid='22e26c0b-03f2-4d2e-ae87-c02d7f33c000',
driver='fake-driverz',
owner='z')
+ dbapi = db_api.get_instance()
+ dbapi.update_allocation(fake_db_allocation['id'],
+ dict(node_id=allocated_node_id))
fake_vif_port_id = "ee21d58f-5de2-4956-85ff-33935ea1ca00"
fake_db_port = db_utils.create_test_port(
node_id=fake_db_node['id'],
@@ -263,9 +265,9 @@ class TestRBACModelBeforeScopesBase(TestACLBase):
fake_db_deploy_template = db_utils.create_test_deploy_template()
fake_db_conductor = db_utils.create_test_conductor()
fake_db_volume_target = db_utils.create_test_volume_target(
- node_id=fake_db_allocation['id'])
+ node_id=fake_db_node['id'])
fake_db_volume_connector = db_utils.create_test_volume_connector(
- node_id=fake_db_allocation['id'])
+ node_id=fake_db_node['id'])
# Trait name aligns with create_test_node_trait.
fake_trait = 'trait'
fake_setting = 'FAKE_SETTING'
@@ -407,9 +409,7 @@ class TestRBACProjectScoped(TestACLBase):
node_id=owned_node.id)
# Leased nodes
- fake_allocation_id = 61
leased_node = db_utils.create_test_node(
- allocation_id=fake_allocation_id,
uuid=lessee_node_ident,
owner=owner_project_id,
lessee=lessee_project_id,
@@ -425,22 +425,26 @@ class TestRBACProjectScoped(TestACLBase):
node_id=leased_node['id'])
fake_trait = 'CUSTOM_MEOW'
fake_vif_port_id = "0e21d58f-5de2-4956-85ff-33935ea1ca01"
+ fake_allocation_id = 61
fake_leased_allocation = db_utils.create_test_allocation(
id=fake_allocation_id,
- node_id=leased_node['id'],
owner=lessee_project_id,
resource_class="CUSTOM_LEASED")
+ dbapi = db_api.get_instance()
+ dbapi.update_allocation(fake_allocation_id,
+ dict(node_id=leased_node['id']))
+
leased_node_history = db_utils.create_test_history(
node_id=leased_node.id)
# Random objects that shouldn't be project visible
+ other_node = db_utils.create_test_node(
+ uuid='573208e5-cd41-4e26-8f06-ef44022b3793')
other_port = db_utils.create_test_port(
+ node_id=other_node['id'],
uuid='abfd8dbb-1732-449a-b760-2224035c6b99',
address='00:00:00:00:00:ff')
-
- other_node = db_utils.create_test_node(
- uuid='573208e5-cd41-4e26-8f06-ef44022b3793')
other_pgroup = db_utils.create_test_portgroup(
uuid='5810f41c-6585-41fc-b9c9-a94f50d421b5',
node_id=other_node['id'],
diff --git a/ironic/tests/unit/common/test_driver_factory.py b/ironic/tests/unit/common/test_driver_factory.py
index 77626a084..c4857d21c 100644
--- a/ironic/tests/unit/common/test_driver_factory.py
+++ b/ironic/tests/unit/common/test_driver_factory.py
@@ -319,39 +319,31 @@ class DefaultInterfaceTestCase(db_base.DbTestCase):
iface = driver_factory.default_interface(self.driver, 'deploy')
self.assertEqual('ansible', iface)
- def test_calculated_no_answer(self):
- # manual-management supports no power interfaces
+ def test_calculated_fallback(self):
self.config(default_power_interface=None)
self.config(enabled_power_interfaces=[])
- self.assertRaisesRegex(
- exception.NoValidDefaultForInterface,
- "For hardware type 'ManualManagementHardware', no default "
- "value found for power interface.",
- driver_factory.default_interface, self.driver, 'power')
+ iface = driver_factory.default_interface(self.driver, 'power')
+ self.assertEqual('agent', iface)
def test_calculated_no_answer_drivername(self):
# manual-management instance (of entry-point driver named 'foo')
# supports no power interfaces
self.config(default_power_interface=None)
self.config(enabled_power_interfaces=[])
- self.assertRaisesRegex(
- exception.NoValidDefaultForInterface,
- "For hardware type 'foo', no default value found for power "
- "interface.",
- driver_factory.default_interface, self.driver, 'power',
- driver_name='foo')
+ self.assertEqual(
+ "agent",
+ driver_factory.default_interface(self.driver, 'power',
+ driver_name='foo'))
def test_calculated_no_answer_drivername_node(self):
# for a node with manual-management instance (of entry-point driver
# named 'foo'), no default power interface is supported
self.config(default_power_interface=None)
self.config(enabled_power_interfaces=[])
- self.assertRaisesRegex(
- exception.NoValidDefaultForInterface,
- "For node bar with hardware type 'foo', no default "
- "value found for power interface.",
- driver_factory.default_interface, self.driver, 'power',
- driver_name='foo', node='bar')
+ self.assertEqual(
+ "agent",
+ driver_factory.default_interface(self.driver, 'power',
+ driver_name='foo', node='bar'))
@mock.patch.object(driver_factory, 'get_interface', autospec=True)
def test_check_exception_IncompatibleInterface(self, mock_get_interface):
@@ -490,15 +482,18 @@ class HardwareTypeLoadTestCase(db_base.DbTestCase):
task_manager.acquire, self.context, node.id)
mock_get_hw_type.assert_called_once_with('fake-2')
- def test_build_driver_for_task_no_defaults(self):
+ def test_build_driver_for_task_fallback_defaults(self):
self.config(dhcp_provider=None, group='dhcp')
+ self.config(enabled_hardware_types=['fake-hardware'])
for iface in drivers_base.ALL_INTERFACES:
if iface not in ['network', 'storage']:
self.config(**{'enabled_%s_interfaces' % iface: []})
self.config(**{'default_%s_interface' % iface: None})
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
- self.assertRaises(exception.NoValidDefaultForInterface,
- task_manager.acquire, self.context, node.id)
+ with task_manager.acquire(self.context, node.id) as task:
+ for iface in drivers_base.ALL_INTERFACES:
+ impl = getattr(task.driver, iface)
+ self.assertIsNotNone(impl)
def test_build_driver_for_task_calculated_defaults(self):
self.config(dhcp_provider=None, group='dhcp')
@@ -581,10 +576,8 @@ class HardwareTypeLoadTestCase(db_base.DbTestCase):
# for storage, so we'll test this case with raid.
self.config(enabled_raid_interfaces=[])
node = obj_utils.get_test_node(self.context, driver='fake-hardware')
- self.assertRaisesRegex(
- exception.NoValidDefaultForInterface,
- "raid interface",
- driver_factory.check_and_update_node_interfaces, node)
+ self.assertTrue(driver_factory.check_and_update_node_interfaces(node))
+ self.assertEqual('fake', node.raid_interface)
def _test_enabled_supported_interfaces(self, enable_storage):
ht = fake_hardware.FakeHardware()
diff --git a/ironic/tests/unit/common/test_images.py b/ironic/tests/unit/common/test_images.py
index 14017682b..9892d671c 100644
--- a/ironic/tests/unit/common/test_images.py
+++ b/ironic/tests/unit/common/test_images.py
@@ -22,7 +22,6 @@ import shutil
from unittest import mock
from ironic_lib import disk_utils
-from ironic_lib import utils as ironic_utils
from oslo_concurrency import processutils
from oslo_config import cfg
@@ -299,12 +298,9 @@ class FsImageTestCase(base.TestCase):
@mock.patch.object(images, '_create_root_fs', autospec=True)
@mock.patch.object(utils, 'tempdir', autospec=True)
@mock.patch.object(utils, 'write_to_file', autospec=True)
- @mock.patch.object(ironic_utils, 'dd', autospec=True)
- @mock.patch.object(utils, 'umount', autospec=True)
- @mock.patch.object(utils, 'mount', autospec=True)
- @mock.patch.object(ironic_utils, 'mkfs', autospec=True)
+ @mock.patch.object(utils, 'execute', autospec=True)
def test_create_vfat_image(
- self, mkfs_mock, mount_mock, umount_mock, dd_mock, write_mock,
+ self, execute_mock, write_mock,
tempdir_mock, create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=io.BytesIO)
@@ -317,78 +313,34 @@ class FsImageTestCase(base.TestCase):
files_info=files_info, parameters_file='qwe',
fs_size_kib=1000)
- dd_mock.assert_called_once_with('/dev/zero',
- 'tgt_file',
- 'count=1',
- 'bs=1000KiB')
-
- mkfs_mock.assert_called_once_with('vfat', 'tgt_file',
- label="ir-vfd-dev")
- mount_mock.assert_called_once_with('tgt_file', 'tempdir',
- '-o', 'umask=0')
+ execute_mock.assert_has_calls([
+ mock.call('dd', 'if=/dev/zero', 'of=tgt_file', 'count=1',
+ 'bs=1000KiB'),
+ mock.call('mkfs', '-t', 'vfat', '-n', 'ir-vfd-de', 'tgt_file'),
+ mock.call('mcopy', '-s', 'tempdir/*', '-i', 'tgt_file', '::')
+ ])
parameters_file_path = os.path.join('tempdir', 'qwe')
write_mock.assert_called_once_with(parameters_file_path, 'p1=v1')
create_root_fs_mock.assert_called_once_with('tempdir', files_info)
- umount_mock.assert_called_once_with('tempdir')
-
- @mock.patch.object(images, '_create_root_fs', autospec=True)
- @mock.patch.object(utils, 'tempdir', autospec=True)
- @mock.patch.object(ironic_utils, 'dd', autospec=True)
- @mock.patch.object(utils, 'umount', autospec=True)
- @mock.patch.object(utils, 'mount', autospec=True)
- @mock.patch.object(ironic_utils, 'mkfs', autospec=True)
- def test_create_vfat_image_always_umount(
- self, mkfs_mock, mount_mock, umount_mock, dd_mock,
- tempdir_mock, create_root_fs_mock):
-
- mock_file_handle = mock.MagicMock(spec=io.BytesIO)
- mock_file_handle.__enter__.return_value = 'tempdir'
- tempdir_mock.return_value = mock_file_handle
- files_info = {'a': 'b'}
- create_root_fs_mock.side_effect = OSError()
- self.assertRaises(exception.ImageCreationFailed,
- images.create_vfat_image, 'tgt_file',
- files_info=files_info)
-
- umount_mock.assert_called_once_with('tempdir')
- @mock.patch.object(ironic_utils, 'dd', autospec=True)
- def test_create_vfat_image_dd_fails(self, dd_mock):
+ @mock.patch.object(utils, 'execute', autospec=True)
+ def test_create_vfat_image_dd_fails(self, execute_mock):
- dd_mock.side_effect = processutils.ProcessExecutionError
+ execute_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
@mock.patch.object(utils, 'tempdir', autospec=True)
- @mock.patch.object(ironic_utils, 'dd', autospec=True)
- @mock.patch.object(ironic_utils, 'mkfs', autospec=True)
- def test_create_vfat_image_mkfs_fails(self, mkfs_mock, dd_mock,
+ @mock.patch.object(utils, 'execute', autospec=True)
+ def test_create_vfat_image_mkfs_fails(self, execute_mock,
tempdir_mock):
mock_file_handle = mock.MagicMock(spec=io.BytesIO)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
- mkfs_mock.side_effect = processutils.ProcessExecutionError
- self.assertRaises(exception.ImageCreationFailed,
- images.create_vfat_image, 'tgt_file')
-
- @mock.patch.object(images, '_create_root_fs', autospec=True)
- @mock.patch.object(utils, 'tempdir', autospec=True)
- @mock.patch.object(ironic_utils, 'dd', autospec=True)
- @mock.patch.object(utils, 'umount', autospec=True)
- @mock.patch.object(utils, 'mount', autospec=True)
- @mock.patch.object(ironic_utils, 'mkfs', autospec=True)
- def test_create_vfat_image_umount_fails(
- self, mkfs_mock, mount_mock, umount_mock, dd_mock,
- tempdir_mock, create_root_fs_mock):
-
- mock_file_handle = mock.MagicMock(spec=io.BytesIO)
- mock_file_handle.__enter__.return_value = 'tempdir'
- tempdir_mock.return_value = mock_file_handle
- umount_mock.side_effect = processutils.ProcessExecutionError
-
+ execute_mock.side_effect = [None, processutils.ProcessExecutionError]
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
diff --git a/ironic/tests/unit/common/test_neutron.py b/ironic/tests/unit/common/test_neutron.py
index 900049df6..7dc67ab32 100644
--- a/ironic/tests/unit/common/test_neutron.py
+++ b/ironic/tests/unit/common/test_neutron.py
@@ -290,11 +290,13 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
fixed_ips=[])
self.client_mock.create_port.side_effect = [self.neutron_port,
neutron_port2]
+ update_mock.side_effect = [self.neutron_port, neutron_port2]
expected = {port.uuid: self.neutron_port.id,
port2.uuid: neutron_port2.id}
else:
self.client_mock.create_port.return_value = self.neutron_port
+ update_mock.return_value = self.neutron_port
expected = {port.uuid: self.neutron_port['id']}
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -458,6 +460,7 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
vpi_mock.return_value = True
# Ensure we can create ports
self.client_mock.create_port.return_value = self.neutron_port
+ update_mock.return_value = self.neutron_port
expected = {port.uuid: self.neutron_port.id}
with task_manager.acquire(self.context, self.node.uuid) as task:
ports = neutron.add_ports_to_network(task, self.network_uuid)
@@ -492,6 +495,7 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
)
self.client_mock.create_port.side_effect = [
self.neutron_port, openstack_exc.OpenStackCloudException]
+ update_mock.return_value = self.neutron_port
with task_manager.acquire(self.context, self.node.uuid) as task:
neutron.add_ports_to_network(task, self.network_uuid)
self.assertIn("Could not create neutron port for node's",
@@ -999,6 +1003,7 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
# Ensure we can create ports
self.client_mock.create_port.return_value = self.neutron_port
+ update_mock.return_value = self.neutron_port
expected = {port.uuid: self.neutron_port.id}
with task_manager.acquire(self.context, self.node.uuid) as task:
ports = neutron.add_ports_to_network(task, self.network_uuid)
diff --git a/ironic/tests/unit/conductor/test_base_manager.py b/ironic/tests/unit/conductor/test_base_manager.py
index 7ae2c74ee..f92c6e58c 100644
--- a/ironic/tests/unit/conductor/test_base_manager.py
+++ b/ironic/tests/unit/conductor/test_base_manager.py
@@ -193,11 +193,11 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertTrue(mock_df.called)
self.assertFalse(mock_reg.called)
- def test_start_fails_on_no_enabled_interfaces(self):
- self.config(enabled_boot_interfaces=[])
- self.assertRaisesRegex(exception.ConfigInvalid,
- 'options enabled_boot_interfaces',
- self.service.init_host)
+ def test_start_with_no_enabled_interfaces(self):
+ self.config(enabled_boot_interfaces=[],
+ enabled_deploy_interfaces=[],
+ enabled_hardware_types=['fake-hardware'])
+ self._start_service()
@mock.patch.object(base_manager, 'LOG', autospec=True)
@mock.patch.object(driver_factory, 'HardwareTypesFactory', autospec=True)
@@ -311,17 +311,6 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(2, mock_dbapi.call_count)
-class CheckInterfacesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- def test__check_enabled_interfaces_success(self):
- base_manager._check_enabled_interfaces()
-
- def test__check_enabled_interfaces_failure(self):
- self.config(enabled_boot_interfaces=[])
- self.assertRaisesRegex(exception.ConfigInvalid,
- 'options enabled_boot_interfaces',
- base_manager._check_enabled_interfaces)
-
-
class KeepAliveTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__conductor_service_record_keepalive(self):
self._start_service()
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index 81984e88b..dee5b6974 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -6502,6 +6502,8 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertTrue(mock_inspect.called)
+@mock.patch.object(conductor_utils, 'node_history_record',
+ mock.Mock(spec=conductor_utils.node_history_record))
@mock.patch.object(task_manager, 'acquire', autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
autospec=True)
@@ -8207,30 +8209,33 @@ class NodeHistoryRecordCleanupTestCase(mgr_utils.ServiceSetUpMixin,
def setUp(self):
super(NodeHistoryRecordCleanupTestCase, self).setUp()
- self.node1 = obj_utils.get_test_node(self.context,
- driver='fake-hardware',
- id=10,
- uuid=uuidutils.generate_uuid(),
- conductor_affinity=1)
- self.node2 = obj_utils.get_test_node(self.context,
- driver='fake-hardware',
- id=11,
- uuid=uuidutils.generate_uuid(),
- conductor_affinity=1)
- self.node3 = obj_utils.get_test_node(self.context,
- driver='fake-hardware',
- id=12,
- uuid=uuidutils.generate_uuid(),
- conductor_affinity=1)
+ CONF.set_override('node_history_max_entries', 2, group='conductor')
+ CONF.set_override('node_history_cleanup_batch_count', 2,
+ group='conductor')
+ self._start_service()
+ self.node1 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=10,
+ uuid=uuidutils.generate_uuid(),
+ conductor_affinity=self.service.conductor.id)
+ self.node2 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=11,
+ uuid=uuidutils.generate_uuid(),
+ conductor_affinity=self.service.conductor.id)
+ self.node3 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=12,
+ uuid=uuidutils.generate_uuid(),
+ conductor_affinity=self.service.conductor.id)
self.nodes = [self.node1, self.node2, self.node3]
# Create the nodes, as the tasks need to operate across tables.
self.node1.create()
self.node2.create()
self.node3.create()
- CONF.set_override('node_history_max_entries', 2, group='conductor')
- CONF.set_override('node_history_cleanup_batch_count', 2,
- group='conductor')
- self._start_service()
def test_history_is_pruned_to_config(self):
for node in self.nodes:
@@ -8305,11 +8310,12 @@ class NodeHistoryRecordCleanupTestCase(mgr_utils.ServiceSetUpMixin,
self.assertEqual(1, len(events))
def test_history_pruning_not_other_conductor(self):
+ another_conductor = obj_utils.create_test_conductor(self.context)
node = obj_utils.get_test_node(self.context,
driver='fake-hardware',
id=33,
uuid=uuidutils.generate_uuid(),
- conductor_affinity=2)
+ conductor_affinity=another_conductor.id)
# create node so it can be queried
node.create()
for i in range(0, 3):
diff --git a/ironic/tests/unit/db/test_node_history.py b/ironic/tests/unit/db/test_node_history.py
index 9e554cd9c..ac0caca36 100644
--- a/ironic/tests/unit/db/test_node_history.py
+++ b/ironic/tests/unit/db/test_node_history.py
@@ -59,6 +59,7 @@ class DBNodeHistoryTestCase(base.DbTestCase):
for i in range(1, 6):
history = db_utils.create_test_history(
id=i, uuid=uuidutils.generate_uuid(),
+ node_id=self.node.id,
conductor='test-conductor', user='fake-user',
event='Something bad happened but fear not %s' % i,
severity='ERROR', event_type='test')
diff --git a/ironic/tests/unit/db/test_portgroups.py b/ironic/tests/unit/db/test_portgroups.py
index fa0402094..c39bef62a 100644
--- a/ironic/tests/unit/db/test_portgroups.py
+++ b/ironic/tests/unit/db/test_portgroups.py
@@ -42,6 +42,7 @@ class DbportgroupTestCase(base.DbTestCase):
for i in range(1, count):
portgroup = db_utils.create_test_portgroup(
uuid=uuidutils.generate_uuid(),
+ node_id=self.node.id,
name='portgroup' + str(i),
address='52:54:00:cf:2d:4%s' % i)
uuids.append(str(portgroup.uuid))
diff --git a/ironic/tests/unit/db/test_ports.py b/ironic/tests/unit/db/test_ports.py
index 18b8a9032..97d1e98a7 100644
--- a/ironic/tests/unit/db/test_ports.py
+++ b/ironic/tests/unit/db/test_ports.py
@@ -77,6 +77,7 @@ class DbPortTestCase(base.DbTestCase):
uuids = []
for i in range(1, 6):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ node_id=self.node.id,
address='52:54:00:cf:2d:4%s' % i)
uuids.append(str(port.uuid))
# Also add the uuid for the port created in setUp()
@@ -89,6 +90,7 @@ class DbPortTestCase(base.DbTestCase):
uuids = []
for i in range(1, 6):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ node_id=self.node.id,
address='52:54:00:cf:2d:4%s' % i)
uuids.append(str(port.uuid))
# Also add the uuid for the port created in setUp()
@@ -101,9 +103,12 @@ class DbPortTestCase(base.DbTestCase):
self.dbapi.get_port_list, sort_key='foo')
def test_get_port_list_filter_by_node_owner(self):
+ another_node = db_utils.create_test_node(
+ uuid=uuidutils.generate_uuid())
uuids = []
for i in range(1, 3):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ node_id=another_node.id,
address='52:54:00:cf:2d:4%s' % i)
for i in range(4, 6):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
@@ -117,6 +122,8 @@ class DbPortTestCase(base.DbTestCase):
self.assertCountEqual(uuids, res_uuids)
def test_get_port_list_filter_by_node_project(self):
+ another_node = db_utils.create_test_node(
+ uuid=uuidutils.generate_uuid())
lessee_node = db_utils.create_test_node(uuid=uuidutils.generate_uuid(),
lessee=self.node.owner)
@@ -128,6 +135,7 @@ class DbPortTestCase(base.DbTestCase):
uuids.append(str(port.uuid))
for i in range(4, 6):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ node_id=another_node.id,
address='52:54:00:cf:2d:4%s' % i)
for i in range(7, 9):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
diff --git a/ironic/tests/unit/db/test_volume_connectors.py b/ironic/tests/unit/db/test_volume_connectors.py
index b15e03275..005933fa4 100644
--- a/ironic/tests/unit/db/test_volume_connectors.py
+++ b/ironic/tests/unit/db/test_volume_connectors.py
@@ -68,6 +68,7 @@ class DbVolumeConnectorTestCase(base.DbTestCase):
for i in range(1, 6):
volume_connector = db_utils.create_test_volume_connector(
uuid=uuidutils.generate_uuid(),
+ node_id=self.node.id,
type='iqn',
connector_id='iqn.test-%s' % i)
uuids.append(str(volume_connector.uuid))
diff --git a/ironic/tests/unit/db/test_volume_targets.py b/ironic/tests/unit/db/test_volume_targets.py
index 62db5353f..200addf83 100644
--- a/ironic/tests/unit/db/test_volume_targets.py
+++ b/ironic/tests/unit/db/test_volume_targets.py
@@ -82,6 +82,7 @@ class DbVolumeTargetTestCase(base.DbTestCase):
for i in range(1, num):
volume_target = db_utils.create_test_volume_target(
uuid=uuidutils.generate_uuid(),
+ node_id=self.node.id,
properties={"target_iqn": "iqn.test-%s" % i},
boot_index=i)
uuids.append(str(volume_target.uuid))
@@ -150,7 +151,8 @@ class DbVolumeTargetTestCase(base.DbTestCase):
def test_update_volume_target_duplicated_nodeid_and_bootindex(self):
t = db_utils.create_test_volume_target(uuid=uuidutils.generate_uuid(),
- boot_index=1)
+ boot_index=1,
+ node_id=self.node.id)
self.assertRaises(exception.VolumeTargetBootIndexAlreadyExists,
self.dbapi.update_volume_target,
t.uuid,
diff --git a/ironic/tests/unit/dhcp/test_neutron.py b/ironic/tests/unit/dhcp/test_neutron.py
index 8d87345d2..9a1f28ddb 100644
--- a/ironic/tests/unit/dhcp/test_neutron.py
+++ b/ironic/tests/unit/dhcp/test_neutron.py
@@ -166,7 +166,13 @@ class TestNeutron(db_base.DbTestCase):
"ip_address": "192.168.1.3",
},
{
+ "ip_address": "192.168.1.4",
+ },
+ {
"ip_address": "2001:db8::201",
+ },
+ {
+ "ip_address": "2001:db8::202",
}
],
}
diff --git a/ironic/tests/unit/drivers/modules/network/test_noop.py b/ironic/tests/unit/drivers/modules/network/test_noop.py
index 6de7812c2..6df55ec68 100644
--- a/ironic/tests/unit/drivers/modules/network/test_noop.py
+++ b/ironic/tests/unit/drivers/modules/network/test_noop.py
@@ -39,7 +39,8 @@ class NoopInterfaceTestCase(db_base.DbTestCase):
self.interface.port_changed(task, self.port)
def test_portgroup_changed(self):
- portgroup = utils.create_test_portgroup(self.context)
+ portgroup = utils.create_test_portgroup(self.context,
+ node_id=self.node.id)
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.portgroup_changed(task, portgroup)
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py b/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py
new file mode 100644
index 000000000..60c66c024
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/redfish/test_firmware_utils.py
@@ -0,0 +1,88 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironic.common import exception
+from ironic.drivers.modules.redfish import firmware_utils
+from ironic.tests import base
+
+
+class FirmwareUtilsTestCase(base.TestCase):
+
+ def test_validate_update_firmware_args(self):
+ firmware_images = [
+ {
+ "url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "wait": 300
+ },
+ {
+ "url": "https://192.0.2.10/NIC_19.0.12_A00.EXE"
+ }
+ ]
+ firmware_utils.validate_update_firmware_args(firmware_images)
+
+ def test_validate_update_firmware_args_not_list(self):
+ firmware_images = {
+ "url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "wait": 300
+ }
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue, "is not of type 'array'",
+ firmware_utils.validate_update_firmware_args, firmware_images)
+
+ def test_validate_update_firmware_args_unknown_key(self):
+ firmware_images = [
+ {
+ "url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "wait": 300,
+ },
+ {
+ "url": "https://192.0.2.10/NIC_19.0.12_A00.EXE",
+ "something": "unknown"
+ }
+ ]
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue, "'something' was unexpected",
+ firmware_utils.validate_update_firmware_args, firmware_images)
+
+ def test_validate_update_firmware_args_url_missing(self):
+ firmware_images = [
+ {
+ "url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "wait": 300,
+ },
+ {
+ "wait": 300
+ }
+ ]
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue,
+ "'url' is a required property",
+ firmware_utils.validate_update_firmware_args, firmware_images)
+
+ def test_validate_update_firmware_args_url_not_string(self):
+ firmware_images = [{
+ "url": 123,
+ "wait": 300
+ }]
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue, "123 is not of type 'string'",
+ firmware_utils.validate_update_firmware_args, firmware_images)
+
+ def test_validate_update_firmware_args_wait_not_int(self):
+ firmware_images = [{
+ "url": "http://192.0.2.10/BMC_4_22_00_00.EXE",
+ "wait": 'abc'
+ }]
+ self.assertRaisesRegex(
+ exception.InvalidParameterValue, "'abc' is not of type 'integer'",
+ firmware_utils.validate_update_firmware_args, firmware_images)
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_management.py b/ironic/tests/unit/drivers/modules/redfish/test_management.py
index 5930816ab..b46700664 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_management.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_management.py
@@ -856,6 +856,14 @@ class RedfishManagementTestCase(db_base.DbTestCase):
task.node)
mock_node_power_action.assert_called_once_with(task, states.REBOOT)
+ def test_update_firmware_invalid_args(self):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaises(
+ exception.InvalidParameterValue,
+ task.driver.management.update_firmware,
+ task, [{'urlX': 'test1'}, {'url': 'test2'}])
+
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test__query_firmware_update_failed(self, mock_acquire):
driver_internal_info = {
diff --git a/ironic/tests/unit/drivers/modules/test_inspect_utils.py b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
index 3c443c3fc..3c636a1b1 100644
--- a/ironic/tests/unit/drivers/modules/test_inspect_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
@@ -55,17 +55,22 @@ class InspectFunctionTestCase(db_base.DbTestCase):
port_obj1.create.assert_called_once_with()
port_obj2.create.assert_called_once_with()
+ @mock.patch.object(utils.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(utils.LOG, 'info', spec_set=True, autospec=True)
@mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
def test_create_ports_if_not_exist_mac_exception(self,
create_mock,
- log_mock):
+ log_mock,
+ warn_mock):
create_mock.side_effect = exception.MACAlreadyExists('f')
- macs = {'aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb'}
+ macs = {'aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb',
+ 'aa:aa:aa:aa:aa:aa:bb:bb'} # WWN
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
utils.create_ports_if_not_exist(task, macs)
self.assertEqual(2, log_mock.call_count)
+ self.assertEqual(2, create_mock.call_count)
+ self.assertEqual(1, warn_mock.call_count)
@mock.patch.object(utils.LOG, 'info', spec_set=True, autospec=True)
@mock.patch.object(objects, 'Port', spec_set=True, autospec=True)
diff --git a/ironic/tests/unit/objects/test_volume_connector.py b/ironic/tests/unit/objects/test_volume_connector.py
index 380caf982..df98f2c36 100644
--- a/ironic/tests/unit/objects/test_volume_connector.py
+++ b/ironic/tests/unit/objects/test_volume_connector.py
@@ -188,8 +188,10 @@ class TestVolumeConnectorObject(db_base.DbTestCase,
self.assertEqual(self.context, c._context)
def test_save_after_refresh(self):
+ node = db_utils.create_test_node()
# Ensure that it's possible to do object.save() after object.refresh()
- db_volume_connector = db_utils.create_test_volume_connector()
+ db_volume_connector = db_utils.create_test_volume_connector(
+ node_id=node['id'])
vc = objects.VolumeConnector.get_by_uuid(self.context,
db_volume_connector.uuid)
diff --git a/ironic/tests/unit/objects/test_volume_target.py b/ironic/tests/unit/objects/test_volume_target.py
index cb57e6b39..396ceee5e 100644
--- a/ironic/tests/unit/objects/test_volume_target.py
+++ b/ironic/tests/unit/objects/test_volume_target.py
@@ -198,8 +198,10 @@ class TestVolumeTargetObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
self.assertEqual(self.context, target._context)
def test_save_after_refresh(self):
+ node = db_utils.create_test_node()
# Ensure that it's possible to do object.save() after object.refresh()
- db_volume_target = db_utils.create_test_volume_target()
+ db_volume_target = db_utils.create_test_volume_target(
+ node_id=node.id)
vt = objects.VolumeTarget.get_by_uuid(self.context,
db_volume_target.uuid)