summaryrefslogtreecommitdiff
path: root/ironic/drivers/modules/redfish
diff options
context:
space:
mode:
authorDmitry Tantsur <dtantsur@protonmail.com>2021-10-05 14:35:22 +0200
committerDmitry Tantsur <dtantsur@protonmail.com>2021-10-11 17:26:06 +0200
commitcf1b42ea3d35d51d327e3aff0a05a9d402af0e15 (patch)
treedf1092433ce1ef8be0cef8dec8b5dd6c84f67976 /ironic/drivers/modules/redfish
parent7f9badb5437631fdea0f31b85aee3373f7bbb985 (diff)
downloadironic-cf1b42ea3d35d51d327e3aff0a05a9d402af0e15.tar.gz
Add a helper for node-based periodics
We have a very common pattern of periodic tasks that use iter_nodes to fetch some nodes, check them, create a task and conductor some operation. This change introduces a helper decorator for that and migrates the drivers to it. I'm intentionally leaving unit tests intact to demonstrate that the new decorator works exactly the same way (modulo cosmetic changes) as the previous hand-written code. Change-Id: Ifed4a457275d9451cc412dc80f3c09df72f50492 Story: #2009203 Task: #43522
Diffstat (limited to 'ironic/drivers/modules/redfish')
-rw-r--r--ironic/drivers/modules/redfish/management.py116
-rw-r--r--ironic/drivers/modules/redfish/raid.py115
2 files changed, 62 insertions, 169 deletions
diff --git a/ironic/drivers/modules/redfish/management.py b/ironic/drivers/modules/redfish/management.py
index 9a68d9975..ab1a105ef 100644
--- a/ironic/drivers/modules/redfish/management.py
+++ b/ironic/drivers/modules/redfish/management.py
@@ -15,7 +15,6 @@
import collections
-from futurist import periodics
from ironic_lib import metrics_utils
from oslo_log import log
from oslo_utils import importutils
@@ -29,6 +28,7 @@ from ironic.common.i18n import _
from ironic.common import indicator_states
from ironic.common import states
from ironic.common import utils
+from ironic.conductor import periodics
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
@@ -853,100 +853,46 @@ class RedfishManagement(base.ManagementInterface):
node.save()
@METRICS.timer('RedfishManagement._query_firmware_update_failed')
- @periodics.periodic(
+ @periodics.node_periodic(
+ purpose='checking if async firmware update failed',
spacing=CONF.redfish.firmware_update_fail_interval,
- enabled=CONF.redfish.firmware_update_fail_interval > 0)
- def _query_firmware_update_failed(self, manager, context):
+ filters={'reserved': False, 'provision_state': states.CLEANFAIL,
+ 'maintenance': True},
+ predicate_extra_fields=['driver_internal_info'],
+ predicate=lambda n: n.driver_internal_info.get('firmware_updates'),
+ )
+ def _query_firmware_update_failed(self, task, manager, context):
"""Periodic job to check for failed firmware updates."""
+ if not isinstance(task.driver.management, RedfishManagement):
+ return
- filters = {'reserved': False, 'provision_state': states.CLEANFAIL,
- 'maintenance': True}
+ node = task.node
- fields = ['driver_internal_info']
+ # A firmware update failed. Discard any remaining firmware
+ # updates so when the user takes the node out of
+ # maintenance mode, pending firmware updates do not
+ # automatically continue.
+ LOG.warning('Firmware update failed for node %(node)s. '
+ 'Discarding remaining firmware updates.',
+ {'node': node.uuid})
- node_list = manager.iter_nodes(fields=fields, filters=filters)
- for (node_uuid, driver, conductor_group,
- driver_internal_info) in node_list:
- try:
- firmware_updates = driver_internal_info.get(
- 'firmware_updates')
- # NOTE(TheJulia): If we don't have a entry upfront, we can
- # safely skip past the node as we know work here is not
- # required, otherwise minimizing the number of potential
- # nodes to visit.
- if not firmware_updates:
- continue
-
- lock_purpose = 'checking async firmware update failed.'
- with task_manager.acquire(context, node_uuid,
- purpose=lock_purpose,
- shared=True) as task:
- if not isinstance(task.driver.management,
- RedfishManagement):
- continue
-
- node = task.node
-
- # A firmware update failed. Discard any remaining firmware
- # updates so when the user takes the node out of
- # maintenance mode, pending firmware updates do not
- # automatically continue.
- LOG.warning('Firmware update failed for node %(node)s. '
- 'Discarding remaining firmware updates.',
- {'node': node.uuid})
-
- task.upgrade_lock()
- self._clear_firmware_updates(node)
-
- except exception.NodeNotFound:
- LOG.info('During _query_firmware_update_failed, node '
- '%(node)s was not found and presumed deleted by '
- 'another process.', {'node': node_uuid})
- except exception.NodeLocked:
- LOG.info('During _query_firmware_update_failed, node '
- '%(node)s was already locked by another process. '
- 'Skip.', {'node': node_uuid})
+ task.upgrade_lock()
+ self._clear_firmware_updates(node)
@METRICS.timer('RedfishManagement._query_firmware_update_status')
- @periodics.periodic(
+ @periodics.node_periodic(
+ purpose='checking async firmware update tasks',
spacing=CONF.redfish.firmware_update_status_interval,
- enabled=CONF.redfish.firmware_update_status_interval > 0)
- def _query_firmware_update_status(self, manager, context):
+ filters={'reserved': False, 'provision_state': states.CLEANWAIT},
+ predicate_extra_fields=['driver_internal_info'],
+ predicate=lambda n: n.driver_internal_info.get('firmware_updates'),
+ )
+ def _query_firmware_update_status(self, task, manager, context):
"""Periodic job to check firmware update tasks."""
+ if not isinstance(task.driver.management, RedfishManagement):
+ return
- filters = {'reserved': False, 'provision_state': states.CLEANWAIT}
- fields = ['driver_internal_info']
-
- node_list = manager.iter_nodes(fields=fields, filters=filters)
- for (node_uuid, driver, conductor_group,
- driver_internal_info) in node_list:
- try:
- firmware_updates = driver_internal_info.get(
- 'firmware_updates')
- # NOTE(TheJulia): Check and skip upfront before creating a
- # task so we don't generate additional tasks and db queries
- # for every node in CLEANWAIT which is not locked.
- if not firmware_updates:
- continue
-
- lock_purpose = 'checking async firmware update tasks.'
- with task_manager.acquire(context, node_uuid,
- purpose=lock_purpose,
- shared=True) as task:
- if not isinstance(task.driver.management,
- RedfishManagement):
- continue
-
- self._check_node_firmware_update(task)
-
- except exception.NodeNotFound:
- LOG.info('During _query_firmware_update_status, node '
- '%(node)s was not found and presumed deleted by '
- 'another process.', {'node': node_uuid})
- except exception.NodeLocked:
- LOG.info('During _query_firmware_update_status, node '
- '%(node)s was already locked by another process. '
- 'Skip.', {'node': node_uuid})
+ self._check_node_firmware_update(task)
@METRICS.timer('RedfishManagement._check_node_firmware_update')
def _check_node_firmware_update(self, task):
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index c01d08a9c..95052bb46 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -15,7 +15,6 @@
import math
-from futurist import periodics
from ironic_lib import metrics_utils
from oslo_log import log
from oslo_utils import importutils
@@ -25,7 +24,7 @@ from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import raid
from ironic.common import states
-from ironic.conductor import task_manager
+from ironic.conductor import periodics
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
@@ -1014,98 +1013,46 @@ class RedfishRAID(base.RAIDInterface):
node.save()
@METRICS.timer('RedfishRAID._query_raid_config_failed')
- @periodics.periodic(
+ @periodics.node_periodic(
+ purpose='checking async RAID config failed',
spacing=CONF.redfish.raid_config_fail_interval,
- enabled=CONF.redfish.raid_config_fail_interval > 0)
- def _query_raid_config_failed(self, manager, context):
+ filters={'reserved': False, 'provision_state': states.CLEANFAIL,
+ 'maintenance': True},
+ predicate_extra_fields=['driver_internal_info'],
+ predicate=lambda n: n.driver_internal_info.get('raid_configs'),
+ )
+ def _query_raid_config_failed(self, task, manager, context):
"""Periodic job to check for failed RAID configuration."""
+ if not isinstance(task.driver.raid, RedfishRAID):
+ return
- filters = {'reserved': False, 'provision_state': states.CLEANFAIL,
- 'maintenance': True}
-
- fields = ['driver_internal_info']
+ node = task.node
- node_list = manager.iter_nodes(fields=fields, filters=filters)
- for (node_uuid, driver, conductor_group,
- driver_internal_info) in node_list:
- try:
- raid_configs = driver_internal_info.get(
- 'raid_configs')
- # NOTE(TheJulia): Evaluate the presence of raid configuration
- # activity before pulling the task, so we don't needlessly
- # create database queries with tasks which would be skipped
- # anyhow.
- if not raid_configs:
- continue
+ # A RAID config failed. Discard any remaining RAID
+ # configs so when the user takes the node out of
+ # maintenance mode, pending RAID configs do not
+ # automatically continue.
+ LOG.warning('RAID configuration failed for node %(node)s. '
+ 'Discarding remaining RAID configurations.',
+ {'node': node.uuid})
- lock_purpose = 'checking async RAID config failed.'
- with task_manager.acquire(context, node_uuid,
- purpose=lock_purpose,
- shared=True) as task:
- if not isinstance(task.driver.raid, RedfishRAID):
- continue
-
- node = task.node
-
- # A RAID config failed. Discard any remaining RAID
- # configs so when the user takes the node out of
- # maintenance mode, pending RAID configs do not
- # automatically continue.
- LOG.warning('RAID configuration failed for node %(node)s. '
- 'Discarding remaining RAID configurations.',
- {'node': node.uuid})
-
- task.upgrade_lock()
- self._clear_raid_configs(node)
-
- except exception.NodeNotFound:
- LOG.info('During _query_raid_config_failed, node '
- '%(node)s was not found and presumed deleted by '
- 'another process.', {'node': node_uuid})
- except exception.NodeLocked:
- LOG.info('During _query_raid_config_failed, node '
- '%(node)s was already locked by another process. '
- 'Skip.', {'node': node_uuid})
+ task.upgrade_lock()
+ self._clear_raid_configs(node)
@METRICS.timer('RedfishRAID._query_raid_config_status')
- @periodics.periodic(
+ @periodics.node_periodic(
+ purpose='checking async RAID config tasks',
spacing=CONF.redfish.raid_config_status_interval,
- enabled=CONF.redfish.raid_config_status_interval > 0)
- def _query_raid_config_status(self, manager, context):
+ filters={'reserved': False, 'provision_state': states.CLEANWAIT},
+ predicate_extra_fields=['driver_internal_info'],
+ predicate=lambda n: n.driver_internal_info.get('raid_configs'),
+ )
+ def _query_raid_config_status(self, task, manager, context):
"""Periodic job to check RAID config tasks."""
+ if not isinstance(task.driver.raid, RedfishRAID):
+ return
- filters = {'reserved': False, 'provision_state': states.CLEANWAIT}
- fields = ['driver_internal_info']
-
- node_list = manager.iter_nodes(fields=fields, filters=filters)
- for (node_uuid, driver, conductor_group,
- driver_internal_info) in node_list:
- try:
- raid_configs = driver_internal_info.get(
- 'raid_configs')
- # NOTE(TheJulia): Skip to next record if we do not
- # have raid configuraiton tasks, so we don't pull tasks
- # for every unrelated node in CLEANWAIT.
- if not raid_configs:
- continue
-
- lock_purpose = 'checking async RAID config tasks.'
- with task_manager.acquire(context, node_uuid,
- purpose=lock_purpose,
- shared=True) as task:
- if not isinstance(task.driver.raid, RedfishRAID):
- continue
-
- self._check_node_raid_config(task)
-
- except exception.NodeNotFound:
- LOG.info('During _query_raid_config_status, node '
- '%(node)s was not found and presumed deleted by '
- 'another process.', {'node': node_uuid})
- except exception.NodeLocked:
- LOG.info('During _query_raid_config_status, node '
- '%(node)s was already locked by another process. '
- 'Skip.', {'node': node_uuid})
+ self._check_node_raid_config(task)
def _get_error_messages(self, response):
try: