summaryrefslogtreecommitdiff
path: root/ironic
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2022-01-17 12:12:35 +0000
committerGerrit Code Review <review@openstack.org>2022-01-17 12:12:35 +0000
commit6b8e7ff34f93edb29ddc6ae305fe4ddce3760034 (patch)
tree1efbc1b48b8a9b20cc7959022c85208f5a9d07c4 /ironic
parent9e0e5a2af49a741d99a5c122b59183b266babc11 (diff)
parent729c61d2cfba05baeada2b044c9e453eb84e60ff (diff)
downloadironic-6b8e7ff34f93edb29ddc6ae305fe4ddce3760034.tar.gz
Merge "Fix Redfish RAID deploy steps" into bugfix/19.0
Diffstat (limited to 'ironic')
-rw-r--r--ironic/drivers/modules/redfish/raid.py12
1 files changed, 8 insertions, 4 deletions
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index a7a510811..95a8bf040 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -1016,8 +1016,8 @@ class RedfishRAID(base.RAIDInterface):
@periodics.node_periodic(
purpose='checking async RAID config failed',
spacing=CONF.redfish.raid_config_fail_interval,
- filters={'reserved': False, 'provision_state': states.CLEANFAIL,
- 'maintenance': True},
+ filters={'reserved': False, 'provision_state_in': {
+ states.CLEANFAIL, states.DEPLOYFAIL}, 'maintenance': True},
predicate_extra_fields=['driver_internal_info'],
predicate=lambda n: n.driver_internal_info.get('raid_configs'),
)
@@ -1038,7 +1038,8 @@ class RedfishRAID(base.RAIDInterface):
@periodics.node_periodic(
purpose='checking async RAID config tasks',
spacing=CONF.redfish.raid_config_status_interval,
- filters={'reserved': False, 'provision_state': states.CLEANWAIT},
+ filters={'reserved': False, 'provision_state_in': {
+ states.CLEANWAIT, states.DEPLOYWAIT}},
predicate_extra_fields=['driver_internal_info'],
predicate=lambda n: n.driver_internal_info.get('raid_configs'),
)
@@ -1116,4 +1117,7 @@ class RedfishRAID(base.RAIDInterface):
self._clear_raid_configs(node)
LOG.info('RAID configuration completed for node %(node)s',
{'node': node.uuid})
- manager_utils.notify_conductor_resume_clean(task)
+ if task.node.clean_step:
+ manager_utils.notify_conductor_resume_clean(task)
+ else:
+ manager_utils.notify_conductor_resume_deploy(task)