diff options
author | Yuriy Zveryanskyy <yzveryanskyy@mirantis.com> | 2013-11-19 14:05:36 +0200 |
---|---|---|
committer | Yuriy Zveryanskyy <yzveryanskyy@mirantis.com> | 2013-11-19 17:38:56 +0200 |
commit | 4098de219c3baf0bf92b79f9cd374ab991c54c02 (patch) | |
tree | dfbafe6472c10ee1006a61708934aec602bd3fda /ironic | |
parent | 8b016721971944a6f02079a4d096c5437d483d7d (diff) | |
download | ironic-4098de219c3baf0bf92b79f9cd374ab991c54c02.tar.gz |
Add last_error usage to deploy and teardown methods
Change I7e079627b87b2cb1606e677e287dd08dcc87263a introduce new
'last_error' field in nodes table. This patch add saving error
messages in this field for do_node_deploy and do_node_tear_down
methods of conductor.
Change-Id: I9aaab42c1e91568ab6a4657df4fe972441e3ac6d
Diffstat (limited to 'ironic')
-rw-r--r-- | ironic/conductor/manager.py | 104 | ||||
-rw-r--r-- | ironic/tests/conductor/test_manager.py | 44 |
2 files changed, 87 insertions, 61 deletions
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py index 8375ae675..b3fc922be 100644 --- a/ironic/conductor/manager.py +++ b/ironic/conductor/manager.py @@ -266,33 +266,44 @@ class ConductorManager(service.PeriodicService): LOG.debug(_("RPC do_node_deploy called for node %s.") % node_id) with task_manager.acquire(context, node_id, shared=False) as task: - task.driver.deploy.validate(node_obj) - if node_obj['provision_state'] is not states.NOSTATE: + node = task.node + if node['provision_state'] is not states.NOSTATE: raise exception.InstanceDeployFailure(_( "RPC do_node_deploy called for %(node)s, but provision " "state is already %(state)s.") % - {'node': node_id, 'state': node_obj['provision_state']}) + {'node': node_id, 'state': node['provision_state']}) - # set target state to expose that work is in progress - node_obj['provision_state'] = states.DEPLOYING - node_obj['target_provision_state'] = states.DEPLOYDONE - node_obj.save(context) + try: + task.driver.deploy.validate(node) + except Exception as e: + with excutils.save_and_reraise_exception(): + node['last_error'] = \ + _("Failed to validate deploy info. Error: %s") % e + else: + # set target state to expose that work is in progress + node['provision_state'] = states.DEPLOYING + node['target_provision_state'] = states.DEPLOYDONE + node['last_error'] = None + finally: + node.save(context) try: - new_state = task.driver.deploy.deploy(task, node_obj) - except Exception: + new_state = task.driver.deploy.deploy(task, node) + except Exception as e: with excutils.save_and_reraise_exception(): - node_obj['provision_state'] = states.ERROR - node_obj.save(context) - - # NOTE(deva): Some drivers may return states.DEPLOYING - # eg. if they are waiting for a callback - if new_state == states.DEPLOYDONE: - node_obj['target_provision_state'] = states.NOSTATE - node_obj['provision_state'] = states.ACTIVE + node['last_error'] = _("Failed to deploy. Error: %s") % e + node['provision_state'] = states.ERROR + node['target_provision_state'] = states.NOSTATE else: - node_obj['provision_state'] = new_state - node_obj.save(context) + # NOTE(deva): Some drivers may return states.DEPLOYING + # eg. if they are waiting for a callback + if new_state == states.DEPLOYDONE: + node['target_provision_state'] = states.NOSTATE + node['provision_state'] = states.ACTIVE + else: + node['provision_state'] = new_state + finally: + node.save(context) def do_node_tear_down(self, context, node_obj): """RPC method to tear down an existing node deployment. @@ -306,36 +317,47 @@ class ConductorManager(service.PeriodicService): LOG.debug(_("RPC do_node_tear_down called for node %s.") % node_id) with task_manager.acquire(context, node_id, shared=False) as task: - task.driver.deploy.validate(node_obj) - - if node_obj['provision_state'] not in [states.ACTIVE, - states.DEPLOYFAIL, - states.ERROR]: + node = task.node + if node['provision_state'] not in [states.ACTIVE, + states.DEPLOYFAIL, + states.ERROR]: raise exception.InstanceDeployFailure(_( "RCP do_node_tear_down " "not allowed for node %(node)s in state %(state)s") - % {'node': node_id, 'state': node_obj['provision_state']}) + % {'node': node_id, 'state': node['provision_state']}) - # set target state to expose that work is in progress - node_obj['provision_state'] = states.DELETING - node_obj['target_provision_state'] = states.DELETED - node_obj.save(context) + try: + task.driver.deploy.validate(node) + except Exception as e: + with excutils.save_and_reraise_exception(): + node['last_error'] = \ + ("Failed to validate info for teardown. Error: %s") % e + else: + # set target state to expose that work is in progress + node['provision_state'] = states.DELETING + node['target_provision_state'] = states.DELETED + node['last_error'] = None + finally: + node.save(context) try: - new_state = task.driver.deploy.tear_down(task, node_obj) - except Exception: + new_state = task.driver.deploy.tear_down(task, node) + except Exception as e: with excutils.save_and_reraise_exception(): - node_obj['provision_state'] = states.ERROR - node_obj.save(context) - - # NOTE(deva): Some drivers may return states.DELETING - # eg. if they are waiting for a callback - if new_state == states.DELETED: - node_obj['target_provision_state'] = states.NOSTATE - node_obj['provision_state'] = states.NOSTATE + node['last_error'] = \ + _("Failed to tear down. Error: %s") % e + node['provision_state'] = states.ERROR + node['target_provision_state'] = states.NOSTATE else: - node_obj['provision_state'] = new_state - node_obj.save(context) + # NOTE(deva): Some drivers may return states.DELETING + # eg. if they are waiting for a callback + if new_state == states.DELETED: + node['target_provision_state'] = states.NOSTATE + node['provision_state'] = states.NOSTATE + else: + node['provision_state'] = new_state + finally: + node.save(context) @periodic_task.periodic_task def _conductor_service_record_keepalive(self, context): diff --git a/ironic/tests/conductor/test_manager.py b/ironic/tests/conductor/test_manager.py index 490040e6d..e168f9be2 100644 --- a/ironic/tests/conductor/test_manager.py +++ b/ironic/tests/conductor/test_manager.py @@ -434,8 +434,10 @@ class ManagerTestCase(base.DbTestCase): self.assertRaises(exception.InstanceDeployFailure, self.service.do_node_deploy, self.context, node) - self.assertEqual(node['provision_state'], - states.ERROR) + node.refresh(self.context) + self.assertEqual(node['provision_state'], states.ERROR) + self.assertEqual(node['target_provision_state'], states.NOSTATE) + self.assertIsNotNone(node['last_error']) deploy.assert_called_once() def test_do_node_deploy_ok(self): @@ -448,10 +450,10 @@ class ManagerTestCase(base.DbTestCase): as deploy: deploy.return_value = states.DEPLOYDONE self.service.do_node_deploy(self.context, node) - self.assertEqual(node['provision_state'], - states.ACTIVE) - self.assertEqual(node['target_provision_state'], - states.NOSTATE) + node.refresh(self.context) + self.assertEqual(node['provision_state'], states.ACTIVE) + self.assertEqual(node['target_provision_state'], states.NOSTATE) + self.assertIsNone(node['last_error']) deploy.assert_called_once() def test_do_node_deploy_partial_ok(self): @@ -464,10 +466,10 @@ class ManagerTestCase(base.DbTestCase): as deploy: deploy.return_value = states.DEPLOYING self.service.do_node_deploy(self.context, node) - self.assertEqual(node['provision_state'], - states.DEPLOYING) - self.assertEqual(node['target_provision_state'], - states.DEPLOYDONE) + node.refresh(self.context) + self.assertEqual(node['provision_state'], states.DEPLOYING) + self.assertEqual(node['target_provision_state'], states.DEPLOYDONE) + self.assertIsNone(node['last_error']) deploy.assert_called_once() def test_do_node_tear_down_invalid_state(self): @@ -491,8 +493,10 @@ class ManagerTestCase(base.DbTestCase): self.assertRaises(exception.InstanceDeployFailure, self.service.do_node_tear_down, self.context, node) - self.assertEqual(node['provision_state'], - states.ERROR) + node.refresh(self.context) + self.assertEqual(node['provision_state'], states.ERROR) + self.assertEqual(node['target_provision_state'], states.NOSTATE) + self.assertIsNotNone(node['last_error']) deploy.assert_called_once() def test_do_node_tear_down_ok(self): @@ -505,10 +509,10 @@ class ManagerTestCase(base.DbTestCase): as deploy: deploy.return_value = states.DELETED self.service.do_node_tear_down(self.context, node) - self.assertEqual(node['provision_state'], - states.NOSTATE) - self.assertEqual(node['target_provision_state'], - states.NOSTATE) + node.refresh(self.context) + self.assertEqual(node['provision_state'], states.NOSTATE) + self.assertEqual(node['target_provision_state'], states.NOSTATE) + self.assertIsNone(node['last_error']) deploy.assert_called_once() def test_do_node_tear_down_partial_ok(self): @@ -521,8 +525,8 @@ class ManagerTestCase(base.DbTestCase): as deploy: deploy.return_value = states.DELETING self.service.do_node_tear_down(self.context, node) - self.assertEqual(node['provision_state'], - states.DELETING) - self.assertEqual(node['target_provision_state'], - states.DELETED) + node.refresh(self.context) + self.assertEqual(node['provision_state'], states.DELETING) + self.assertEqual(node['target_provision_state'], states.DELETED) + self.assertIsNone(node['last_error']) deploy.assert_called_once() |