summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ironic/api/controllers/v1/driver.py33
-rw-r--r--ironic/common/hash_ring.py3
-rw-r--r--ironic/conductor/manager.py97
-rw-r--r--ironic/conductor/rpcapi.py18
-rw-r--r--ironic/nova/scheduler/baremetal_host_manager.py4
-rw-r--r--ironic/nova/scheduler/ironic_host_manager.py6
-rw-r--r--ironic/nova/tests/scheduler/test_ironic_host_manager.py16
-rw-r--r--ironic/nova/tests/virt/ironic/test_driver.py54
-rw-r--r--ironic/nova/virt/ironic/client_wrapper.py36
-rw-r--r--ironic/nova/virt/ironic/driver.py126
-rw-r--r--ironic/tests/api/v1/test_drivers.py78
-rw-r--r--ironic/tests/conductor/test_manager.py123
-rw-r--r--ironic/tests/conductor/test_rpcapi.py5
-rw-r--r--ironic/tests/db/sqlalchemy/test_migrations.py9
14 files changed, 482 insertions, 126 deletions
diff --git a/ironic/api/controllers/v1/driver.py b/ironic/api/controllers/v1/driver.py
index 8bca7b209..aa06b5bfb 100644
--- a/ironic/api/controllers/v1/driver.py
+++ b/ironic/api/controllers/v1/driver.py
@@ -25,6 +25,17 @@ from ironic.api.controllers import link
from ironic.common import exception
+# Property information for drivers:
+# key = driver name;
+# value = dictionary of properties of that driver:
+# key = property name.
+# value = description of the property.
+# NOTE(rloo). This is cached for the lifetime of the API service. If one or
+# more conductor services are restarted with new driver versions, the API
+# service should be restarted.
+_DRIVER_PROPERTIES = {}
+
+
class Driver(base.APIBase):
"""API representation of a driver."""
@@ -113,6 +124,10 @@ class DriversController(rest.RestController):
vendor_passthru = DriverPassthruController()
+ _custom_actions = {
+ 'properties': ['GET'],
+ }
+
@wsme_pecan.wsexpose(DriverList)
def get_all(self):
"""Retrieve a list of drivers.
@@ -139,3 +154,21 @@ class DriversController(rest.RestController):
return Driver.convert_with_links(name, list(hosts))
raise exception.DriverNotFound(driver_name=driver_name)
+
+ @wsme_pecan.wsexpose(wtypes.text, wtypes.text)
+ def properties(self, driver_name):
+ """Retrieve property information of the given driver.
+
+ :param driver_name: name of the driver.
+ :returns: dictionary with <property name>:<property description>
+ entries.
+ :raises: DriverNotFound (HTTP 404) if the driver name is invalid or
+ the driver cannot be loaded.
+ """
+ if driver_name not in _DRIVER_PROPERTIES:
+ topic = pecan.request.rpcapi.get_topic_for_driver(driver_name)
+ properties = pecan.request.rpcapi.get_driver_properties(
+ pecan.request.context, driver_name, topic=topic)
+ _DRIVER_PROPERTIES[driver_name] = properties
+
+ return _DRIVER_PROPERTIES[driver_name]
diff --git a/ironic/common/hash_ring.py b/ironic/common/hash_ring.py
index e5c12f9b9..00f272ffd 100644
--- a/ironic/common/hash_ring.py
+++ b/ironic/common/hash_ring.py
@@ -142,4 +142,5 @@ class HashRingManager(object):
try:
return self.hash_rings[driver_name]
except KeyError:
- raise exception.DriverNotFound(driver_name=driver_name)
+ raise exception.DriverNotFound(_("The driver '%s' is unknown.") %
+ driver_name)
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index d8162525d..39c231d62 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -53,6 +53,7 @@ from oslo import messaging
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import hash_ring as hash
+from ironic.common import i18n
from ironic.common import neutron
from ironic.common import states
from ironic.conductor import task_manager
@@ -60,7 +61,6 @@ from ironic.conductor import utils
from ironic.db import api as dbapi
from ironic import objects
from ironic.openstack.common import excutils
-from ironic.openstack.common.gettextutils import _LI
from ironic.openstack.common import lockutils
from ironic.openstack.common import log
from ironic.openstack.common import periodic_task
@@ -68,6 +68,9 @@ from ironic.openstack.common import periodic_task
MANAGER_TOPIC = 'ironic.conductor_manager'
WORKER_SPAWN_lOCK = "conductor_worker_spawn"
+_LW = i18n._LW
+_LI = i18n._LI
+
LOG = log.getLogger(__name__)
conductor_opts = [
@@ -130,7 +133,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
"""Ironic Conductor manager main class."""
# NOTE(rloo): This must be in sync with rpcapi.ConductorAPI's.
- RPC_API_VERSION = '1.15'
+ RPC_API_VERSION = '1.16'
target = messaging.Target(version=RPC_API_VERSION)
@@ -141,11 +144,26 @@ class ConductorManager(periodic_task.PeriodicTasks):
self.topic = topic
self.power_state_sync_count = collections.defaultdict(int)
+ def _get_driver(self, driver_name):
+ """Get the driver.
+
+ :param driver_name: name of the driver.
+ :returns: the driver; an instance of a class which implements
+ :class:`ironic.drivers.base.BaseDriver`.
+ :raises: DriverNotFound if the driver is not loaded.
+
+ """
+ try:
+ return self._driver_factory[driver_name].obj
+ except KeyError:
+ raise exception.DriverNotFound(driver_name=driver_name)
+
def init_host(self):
self.dbapi = dbapi.get_instance()
- self.driver_factory = driver_factory.DriverFactory()
- self.drivers = self.driver_factory.names
+ self._driver_factory = driver_factory.DriverFactory()
+
+ self.drivers = self._driver_factory.names
"""List of driver names which this conductor supports."""
try:
@@ -326,11 +344,7 @@ class ConductorManager(periodic_task.PeriodicTasks):
# Any locking in a top-level vendor action will need to be done by the
# implementation, as there is little we could reasonably lock on here.
LOG.debug("RPC driver_vendor_passthru for driver %s." % driver_name)
- try:
- driver = self.driver_factory[driver_name].obj
- except KeyError:
- raise exception.DriverNotFound(driver_name=driver_name)
-
+ driver = self._get_driver(driver_name)
if not getattr(driver, 'vendor', None):
raise exception.UnsupportedDriverExtension(
driver=driver_name,
@@ -340,6 +354,34 @@ class ConductorManager(periodic_task.PeriodicTasks):
method=driver_method,
**info)
+ def _provisioning_error_handler(self, e, node, context, provision_state,
+ target_provision_state):
+ """Set the node's provisioning states if error occurs.
+
+ This hook gets called upon an exception being raised when spawning
+ the worker to do the deployment or tear down of a node.
+
+ :param e: the exception object that was raised.
+ :param node: an Ironic node object.
+ :param context: security context.
+ :param provision_state: the provision state to be set on
+ the node.
+ :param target_provision_state: the target provision state to be
+ set on the node.
+
+ """
+ if isinstance(e, exception.NoFreeConductorWorker):
+ node.provision_state = provision_state
+ node.target_provision_state = target_provision_state
+ node.last_error = (_("No free conductor workers available"))
+ node.save(context)
+ LOG.warning(_LW("No free conductor workers available to perform "
+ "an action on node %(node)s, setting node's "
+ "provision_state back to %(prov_state)s and "
+ "target_provision_state to %(tgt_prov_state)s."),
+ {'node': node.uuid, 'prov_state': provision_state,
+ 'tgt_prov_state': target_provision_state})
+
@messaging.expected_exceptions(exception.NoFreeConductorWorker,
exception.NodeLocked,
exception.NodeInMaintenance,
@@ -395,11 +437,21 @@ class ConductorManager(periodic_task.PeriodicTasks):
"RPC do_node_deploy failed to validate deploy info. "
"Error: %(msg)s") % {'msg': e})
+ # Save the previous states so we can rollback the node to a
+ # consistent state in case there's no free workers to do the
+ # deploy work
+ previous_prov_state = node.provision_state
+ previous_tgt_provision_state = node.target_provision_state
+
# Set target state to expose that work is in progress
node.provision_state = states.DEPLOYING
node.target_provision_state = states.DEPLOYDONE
node.last_error = None
node.save(context)
+
+ task.set_spawn_error_hook(self._provisioning_error_handler, node,
+ context, previous_prov_state,
+ previous_tgt_provision_state)
task.spawn_after(self._spawn_worker, self._do_node_deploy,
context, task)
@@ -466,10 +518,21 @@ class ConductorManager(periodic_task.PeriodicTasks):
"RPC do_node_tear_down failed to validate deploy info. "
"Error: %(msg)s") % {'msg': e})
+ # save the previous states so we can rollback the node to a
+ # consistent state in case there's no free workers to do the
+ # tear down work
+ previous_prov_state = node.provision_state
+ previous_tgt_provision_state = node.target_provision_state
+
+ # set target state to expose that work is in progress
node.provision_state = states.DELETING
node.target_provision_state = states.DELETED
node.last_error = None
node.save(context)
+
+ task.set_spawn_error_hook(self._provisioning_error_handler, node,
+ context, previous_prov_state,
+ previous_tgt_provision_state)
task.spawn_after(self._spawn_worker, self._do_node_tear_down,
context, task)
@@ -966,3 +1029,19 @@ class ConductorManager(periodic_task.PeriodicTasks):
port_obj.save(context)
return port_obj
+
+ @messaging.expected_exceptions(exception.DriverNotFound)
+ def get_driver_properties(self, context, driver_name):
+ """Get the properties of the driver.
+
+ :param context: request context.
+ :param driver_name: name of the driver.
+ :returns: a dictionary with <property name>:<property description>
+ entries.
+ :raises: DriverNotFound if the driver is not loaded.
+
+ """
+ LOG.debug("RPC get_driver_properties called for driver %s.",
+ driver_name)
+ driver = self._get_driver(driver_name)
+ return driver.get_properties()
diff --git a/ironic/conductor/rpcapi.py b/ironic/conductor/rpcapi.py
index d2b0f885e..cbe69cf19 100644
--- a/ironic/conductor/rpcapi.py
+++ b/ironic/conductor/rpcapi.py
@@ -53,11 +53,12 @@ class ConductorAPI(object):
1.13 - Added update_port.
1.14 - Added driver_vendor_passthru.
1.15 - Added rebuild parameter to do_node_deploy.
+ 1.16 - Added get_driver_properties.
"""
# NOTE(rloo): This must be in sync with manager.ConductorManager's.
- RPC_API_VERSION = '1.15'
+ RPC_API_VERSION = '1.16'
def __init__(self, topic=None):
super(ConductorAPI, self).__init__()
@@ -311,3 +312,18 @@ class ConductorAPI(object):
"""
cctxt = self.client.prepare(topic=topic or self.topic, version='1.13')
return cctxt.call(context, 'update_port', port_obj=port_obj)
+
+ def get_driver_properties(self, context, driver_name, topic=None):
+ """Get the properties of the driver.
+
+ :param context: request context.
+ :param driver_name: name of the driver.
+ :param topic: RPC topic. Defaults to self.topic.
+ :returns: a dictionary with <property name>:<property description>
+ entries.
+ :raises: DriverNotFound.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.16')
+ return cctxt.call(context, 'get_driver_properties',
+ driver_name=driver_name)
diff --git a/ironic/nova/scheduler/baremetal_host_manager.py b/ironic/nova/scheduler/baremetal_host_manager.py
index 6d23ef18d..8aa3c643a 100644
--- a/ironic/nova/scheduler/baremetal_host_manager.py
+++ b/ironic/nova/scheduler/baremetal_host_manager.py
@@ -19,8 +19,6 @@ Manage hosts in the current zone.
"""
import ironic.nova.scheduler.base_baremetal_host_manager as bbhm
-
-from nova.openstack.common import jsonutils
from nova.scheduler import host_manager
@@ -36,7 +34,7 @@ class BaremetalHostManager(bbhm.BaseBaremetalHostManager):
"""Bare-Metal HostManager class."""
def host_state_cls(self, host, node, **kwargs):
- """Factory function/property to create a new HostState"""
+ """Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return BaremetalNodeState(host, node, **kwargs)
diff --git a/ironic/nova/scheduler/ironic_host_manager.py b/ironic/nova/scheduler/ironic_host_manager.py
index 89c7d6b66..562660be1 100644
--- a/ironic/nova/scheduler/ironic_host_manager.py
+++ b/ironic/nova/scheduler/ironic_host_manager.py
@@ -23,11 +23,9 @@ subdivided into multiple instances.
"""
from oslo.config import cfg
-import ironic.nova.scheduler.base_baremetal_host_manager as bbhm
-
-from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from ironic.nova.scheduler import base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
host_manager_opts = [
@@ -87,7 +85,7 @@ class IronicHostManager(bbhm.BaseBaremetalHostManager):
CONF.scheduler_default_filters = baremetal_default
def host_state_cls(self, host, node, **kwargs):
- """Factory function/property to create a new HostState"""
+ """Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return IronicNodeState(host, node, **kwargs)
diff --git a/ironic/nova/tests/scheduler/test_ironic_host_manager.py b/ironic/nova/tests/scheduler/test_ironic_host_manager.py
index 23c99a6fb..d7bdb0955 100644
--- a/ironic/nova/tests/scheduler/test_ironic_host_manager.py
+++ b/ironic/nova/tests/scheduler/test_ironic_host_manager.py
@@ -19,15 +19,14 @@ Tests For IronicHostManager
import mock
-from ironic.nova.scheduler import ironic_host_manager
-from ironic.nova.tests.scheduler import ironic_fakes
-
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova.scheduler import filters
from nova.scheduler import host_manager
+from ironic.nova.scheduler import ironic_host_manager
from nova import test
+from ironic.nova.tests.scheduler import ironic_fakes
class FakeFilterClass1(filters.BaseHostFilter):
@@ -80,14 +79,15 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicHostManagerChangedNodesTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
+ ironic_driver = "nova.virt.ironic.driver.IronicDriver"
+ supported_instances = '[["i386", "baremetal", "baremetal"]]'
self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
- stats=jsonutils.dumps(dict(ironic_driver=
- "nova.virt.ironic.driver.IronicDriver",
- cpu_arch='i386')),
- supported_instances=
- '[["i386", "baremetal", "baremetal"]]',
+ stats=jsonutils.dumps(dict(
+ ironic_driver=ironic_driver,
+ cpu_arch='i386')),
+ supported_instances=supported_instances,
free_disk_gb=10, free_ram_mb=1024)
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
diff --git a/ironic/nova/tests/virt/ironic/test_driver.py b/ironic/nova/tests/virt/ironic/test_driver.py
index 1699fd525..503231ab4 100644
--- a/ironic/nova/tests/virt/ironic/test_driver.py
+++ b/ironic/nova/tests/virt/ironic/test_driver.py
@@ -46,7 +46,6 @@ from nova.virt import firewall
CONF = cfg.CONF
IRONIC_FLAGS = dict(
- instance_type_extra_specs=['test_spec:test_value'],
api_version=1,
group='ironic',
)
@@ -76,8 +75,7 @@ def _get_properties():
def _get_stats():
return {'cpu_arch': 'x86_64',
'ironic_driver':
- 'ironic.nova.virt.ironic.driver.IronicDriver',
- 'test_spec': 'test_value'}
+ 'ironic.nova.virt.ironic.driver.IronicDriver'}
FAKE_CLIENT_WRAPPER = FakeClientWrapper()
@@ -107,7 +105,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.assertEqual(self.driver.get_hypervisor_version(), 1)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
- def test_validate_instance_and_node(self, mock_gbiui):
+ def test__validate_instance_and_node(self, mock_gbiui):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
instance_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
@@ -117,18 +115,18 @@ class IronicDriverTestCase(test.NoDBTestCase):
icli = cw.IronicClientWrapper()
mock_gbiui.return_value = node
- result = ironic_driver.validate_instance_and_node(icli, instance)
+ result = ironic_driver._validate_instance_and_node(icli, instance)
self.assertEqual(result.uuid, node_uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
- def test_validate_instance_and_node_failed(self, mock_gbiui):
+ def test__validate_instance_and_node_failed(self, mock_gbiui):
icli = cw.IronicClientWrapper()
mock_gbiui.side_effect = ironic_exception.NotFound()
instance_uuid = uuidutils.generate_uuid(),
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=instance_uuid)
self.assertRaises(exception.InstanceNotFound,
- ironic_driver.validate_instance_and_node,
+ ironic_driver._validate_instance_and_node,
icli, instance)
def test__node_resource(self):
@@ -631,7 +629,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
- self.assertRaises(exception.InstanceDeployFailure,
+ self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn,
self.ctx, instance, None, [], None)
@@ -642,6 +640,37 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_cleanup_deploy.assert_called_once_with(node, instance, None)
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(flavor_obj.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
+ def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node,
+ mock_looping):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ fake_net_info = utils.get_test_network_info()
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = {'ephemeral_gb': 0}
+ mock_flavor.return_value = fake_flavor
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ fake_looping_call.wait.side_effect = ironic_exception.BadRequest
+ fake_net_info = utils.get_test_network_info()
+ self.assertRaises(ironic_exception.BadRequest,
+ self.driver.spawn,
+ self.ctx, instance, None, [], None, fake_net_info)
+ mock_destroy.assert_called_once_with(self.ctx, instance,
+ fake_net_info)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(instance_obj.Instance, 'save')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(flavor_obj.Flavor, 'get_by_id')
@@ -705,7 +734,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_cleanup_deploy.assert_called_with(node, instance, network_info)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
- @mock.patch.object(ironic_driver, 'validate_instance_and_node')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
@@ -749,7 +778,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
- @mock.patch.object(ironic_driver, 'validate_instance_and_node')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test_reboot(self, mock_val_inst, mock_set_power):
node = ironic_utils.get_test_node()
mock_val_inst.return_value = node
@@ -758,7 +787,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.reboot(self.ctx, instance, None, None)
mock_set_power.assert_called_once_with(node.uuid, 'reboot')
- @mock.patch.object(ironic_driver, 'validate_instance_and_node')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_off(self, mock_sp, fake_validate):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
@@ -772,7 +801,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.power_off(instance)
mock_sp.assert_called_once_with(node_uuid, 'off')
- @mock.patch.object(ironic_driver, 'validate_instance_and_node')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_on(self, mock_sp, fake_validate):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
@@ -982,7 +1011,6 @@ class IronicDriverTestCase(test.NoDBTestCase):
node=node_uuid,
instance_type_id=flavor_id)
-
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
diff --git a/ironic/nova/virt/ironic/client_wrapper.py b/ironic/nova/virt/ironic/client_wrapper.py
index cfda09e40..7c5391867 100644
--- a/ironic/nova/virt/ironic/client_wrapper.py
+++ b/ironic/nova/virt/ironic/client_wrapper.py
@@ -17,14 +17,15 @@
import time
-from ironicclient import client as ironic_client
-from ironicclient import exc as ironic_exception
-
from nova import exception
from nova.openstack.common.gettextutils import _
+from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from oslo.config import cfg
+
+ironic = None
+
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -32,6 +33,23 @@ CONF = cfg.CONF
class IronicClientWrapper(object):
"""Ironic client wrapper class that encapsulates retry logic."""
+ def __init__(self):
+ """Initialise the IronicClientWrapper for use.
+
+ Initialise IronicClientWrapper by loading ironicclient
+ dynamically so that ironicclient is not a dependency for
+ Nova.
+ """
+ global ironic
+ if ironic is None:
+ ironic = importutils.import_module('ironicclient')
+ # NOTE(deva): work around a lack of symbols in the current version.
+ if not hasattr(ironic, 'exc'):
+ ironic.exc = importutils.import_module('ironicclient.exc')
+ if not hasattr(ironic, 'client'):
+ ironic.client = importutils.import_module(
+ 'ironicclient.client')
+
def _get_client(self):
# TODO(deva): save and reuse existing client & auth token
# until it expires or is no longer valid
@@ -48,9 +66,9 @@ class IronicClientWrapper(object):
'ironic_url': CONF.ironic.api_endpoint}
try:
- cli = ironic_client.get_client(CONF.ironic.api_version, **kwargs)
- except ironic_exception.Unauthorized:
- msg = (_("Unable to authenticate Ironic client."))
+ cli = ironic.client.get_client(CONF.ironic.api_version, **kwargs)
+ except ironic.exc.Unauthorized:
+ msg = _("Unable to authenticate Ironic client.")
LOG.error(msg)
raise exception.NovaException(msg)
@@ -78,9 +96,9 @@ class IronicClientWrapper(object):
:raises: NovaException if all retries failed.
"""
- retry_excs = (ironic_exception.ServiceUnavailable,
- ironic_exception.ConnectionRefused,
- ironic_exception.Conflict)
+ retry_excs = (ironic.exc.ServiceUnavailable,
+ ironic.exc.ConnectionRefused,
+ ironic.exc.Conflict)
num_attempts = CONF.ironic.api_max_retries
for attempt in range(1, num_attempts + 1):
diff --git a/ironic/nova/virt/ironic/driver.py b/ironic/nova/virt/ironic/driver.py
index 1ac7d4eb0..043bcaeaf 100644
--- a/ironic/nova/virt/ironic/driver.py
+++ b/ironic/nova/virt/ironic/driver.py
@@ -22,7 +22,6 @@ bare metal resources.
"""
import logging as py_logging
-from ironicclient import exc as ironic_exception
from oslo.config import cfg
from ironic.nova.virt.ironic import client_wrapper
@@ -35,13 +34,17 @@ from nova import exception
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _, _LW
+from nova.openstack.common.gettextutils import _, _LE, _LW
+from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.virt import driver as virt_driver
from nova.virt import firewall
+
+ironic = None
+
LOG = logging.getLogger(__name__)
opts = [
@@ -62,18 +65,8 @@ opts = [
help='Log level override for ironicclient. Set this in '
'order to override the global "default_log_levels", '
'"verbose", and "debug" settings.'),
- cfg.StrOpt('pxe_bootfile_name',
- help='This gets passed to Neutron as the bootfile dhcp '
- 'parameter when the dhcp_options_enabled is set.',
- default='pxelinux.0'),
cfg.StrOpt('admin_tenant_name',
help='Ironic keystone tenant name.'),
- cfg.ListOpt('instance_type_extra_specs',
- default=[],
- help='A list of additional capabilities corresponding to '
- 'instance_type_extra_specs for this compute '
- 'host to advertise. Valid entries are name=value, pairs '
- 'For example, "key1:val1, key2:val2"'),
cfg.IntOpt('api_max_retries',
default=60,
help=('How many retries when a request does conflict.')),
@@ -104,15 +97,15 @@ def map_power_state(state):
try:
return _POWER_STATE_MAP[state]
except KeyError:
- LOG.warning(_("Power state %s not found.") % state)
+ LOG.warning(_LW("Power state %s not found."), state)
return power_state.NOSTATE
-def validate_instance_and_node(icli, instance):
+def _validate_instance_and_node(icli, instance):
"""Get and validate a node's uuid out of a manager instance dict.
- The compute manager is meant to know the node uuid, so missing uuid
- a significant issue - it may mean we've been passed someone elses data.
+ The compute manager is meant to know the node uuid, so missing uuid is
+ a significant issue - it may mean we've been passed someone else's data.
Check with the Ironic service that this node is still associated with
this instance. This catches situations where Nova's instance dict
@@ -121,7 +114,7 @@ def validate_instance_and_node(icli, instance):
"""
try:
return icli.call("node.get_by_instance_uuid", instance['uuid'])
- except ironic_exception.NotFound:
+ except ironic.exc.NotFound:
raise exception.InstanceNotFound(instance_id=instance['uuid'])
@@ -152,17 +145,20 @@ class IronicDriver(virt_driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
super(IronicDriver, self).__init__(virtapi)
+ global ironic
+ if ironic is None:
+ ironic = importutils.import_module('ironicclient')
+ # NOTE(deva): work around a lack of symbols in the current version.
+ if not hasattr(ironic, 'exc'):
+ ironic.exc = importutils.import_module('ironicclient.exc')
+ if not hasattr(ironic, 'client'):
+ ironic.client = importutils.import_module(
+ 'ironicclient.client')
self.firewall_driver = firewall.load_driver(default=_FIREWALL_DRIVER)
extra_specs = {}
extra_specs["ironic_driver"] = \
"ironic.nova.virt.ironic.driver.IronicDriver"
- for pair in CONF.ironic.instance_type_extra_specs:
- keyval = pair.split(':', 1)
- keyval[0] = keyval[0].strip()
- keyval[1] = keyval[1].strip()
- extra_specs[keyval[0]] = keyval[1]
-
self.extra_specs = extra_specs
icli_log_level = CONF.ironic.client_log_level
@@ -172,7 +168,9 @@ class IronicDriver(virt_driver.ComputeDriver):
logger.setLevel(level)
def _node_resources_unavailable(self, node_obj):
- """Determines whether the node's resources should be presented
+ """Determine whether the node's resources are in an acceptable state.
+
+ Determines whether the node's resources should be presented
to Nova for use based on the current power and maintenance state.
"""
bad_states = [ironic_states.ERROR, ironic_states.NOSTATE]
@@ -275,7 +273,7 @@ class IronicDriver(virt_driver.ComputeDriver):
'value': instance['uuid']})
try:
icli.call('node.update', node.uuid, patch)
- except ironic_exception.BadRequest:
+ except ironic.exc.BadRequest:
msg = (_("Failed to add deploy parameters on node %(node)s "
"when provisioning the instance %(instance)s")
% {'node': node.uuid, 'instance': instance['uuid']})
@@ -294,12 +292,11 @@ class IronicDriver(virt_driver.ComputeDriver):
patch.append({'op': 'remove', 'path': '/instance_uuid'})
try:
icli.call('node.update', node.uuid, patch)
- except ironic_exception.BadRequest:
- msg = (_("Failed clean up the parameters on node %(node)s "
- "when unprovisioning the instance %(instance)s")
- % {'node': node.uuid, 'instance': instance['uuid']})
- LOG.error(msg)
- reason = _("Fail to clean up node %s parameters") % node.uuid
+ except ironic.exc.BadRequest:
+ LOG.error(_LE("Failed to clean up the parameters on node %(node)s "
+ "when unprovisioning the instance %(instance)s"),
+ {'node': node.uuid, 'instance': instance['uuid']})
+ reason = (_("Fail to clean up node %s parameters") % node.uuid)
raise exception.InstanceTerminationFailure(reason=reason)
self._unplug_vifs(node, instance, network_info)
@@ -307,7 +304,7 @@ class IronicDriver(virt_driver.ComputeDriver):
def _wait_for_active(self, icli, instance):
"""Wait for the node to be marked as ACTIVE in Ironic."""
- node = validate_instance_and_node(icli, instance)
+ node = _validate_instance_and_node(icli, instance)
if node.provision_state == ironic_states.ACTIVE:
# job is done
LOG.debug("Ironic node %(node)s is now ACTIVE",
@@ -358,7 +355,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
icli = client_wrapper.IronicClientWrapper()
try:
- validate_instance_and_node(icli, instance)
+ _validate_instance_and_node(icli, instance)
return True
except exception.InstanceNotFound:
return False
@@ -397,7 +394,7 @@ class IronicDriver(virt_driver.ComputeDriver):
try:
icli.call("node.get", nodename)
return True
- except ironic_exception.NotFound:
+ except ironic.exc.NotFound:
return False
def get_available_nodes(self, refresh=False):
@@ -447,7 +444,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
icli = client_wrapper.IronicClientWrapper()
try:
- node = validate_instance_and_node(icli, instance)
+ node = _validate_instance_and_node(icli, instance)
except exception.InstanceNotFound:
return {'state': map_power_state(ironic_states.NOSTATE),
'max_mem': 0,
@@ -477,7 +474,7 @@ class IronicDriver(virt_driver.ComputeDriver):
icli = client_wrapper.IronicClientWrapper()
try:
node = icli.call("node.get", instance['node'])
- except ironic_exception.NotFound:
+ except ironic.exc.NotFound:
return []
ports = icli.call("node.list_ports", node.uuid)
return [p.address for p in ports]
@@ -539,8 +536,8 @@ class IronicDriver(virt_driver.ComputeDriver):
self._start_firewall(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error preparing deploy for instance %(instance)s "
- "on baremetal node %(node)s.") %
+ LOG.error(_LE("Error preparing deploy for instance "
+ "%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': node_uuid})
self._cleanup_deploy(node, instance, network_info)
@@ -549,26 +546,25 @@ class IronicDriver(virt_driver.ComputeDriver):
try:
icli.call("node.set_provision_state", node_uuid,
ironic_states.ACTIVE)
- except (exception.NovaException, # Retry failed
- ironic_exception.InternalServerError, # Validations
- ironic_exception.BadRequest) as e: # Maintenance
- msg = (_("Failed to request Ironic to provision instance "
- "%(inst)s: %(reason)s") % {'inst': instance['uuid'],
- 'reason': str(e)})
- LOG.error(msg)
- self._cleanup_deploy(node, instance, network_info)
- raise exception.InstanceDeployFailure(msg)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ msg = (_("Failed to request Ironic to provision instance "
+ "%(inst)s: %(reason)s") % {'inst': instance['uuid'],
+ 'reason': str(e)})
+ LOG.error(msg)
+ self._cleanup_deploy(node, instance, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
icli, instance)
+
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
- except exception.InstanceDeployFailure:
+ except Exception:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error deploying instance %(instance)s on "
- "baremetal node %(node)s.") %
- {'instance': instance['uuid'],
- 'node': node_uuid})
+ LOG.error(_LE("Error deploying instance %(instance)s on "
+ "baremetal node %(node)s."),
+ {'instance': instance['uuid'],
+ 'node': node_uuid})
self.destroy(context, instance, network_info)
def _unprovision(self, icli, instance, node):
@@ -590,7 +586,7 @@ class IronicDriver(virt_driver.ComputeDriver):
data = {'tries': 0}
def _wait_for_provision_state():
- node = validate_instance_and_node(icli, instance)
+ node = _validate_instance_and_node(icli, instance)
if not node.provision_state:
LOG.debug("Ironic node %(node)s is now unprovisioned",
dict(node=node.uuid), instance=instance)
@@ -627,7 +623,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
icli = client_wrapper.IronicClientWrapper()
try:
- node = validate_instance_and_node(icli, instance)
+ node = _validate_instance_and_node(icli, instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance['uuid'])
@@ -662,7 +658,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
icli = client_wrapper.IronicClientWrapper()
- node = validate_instance_and_node(icli, instance)
+ node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'reboot')
def power_off(self, instance):
@@ -673,7 +669,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
# TODO(nobodycam): check the current power state first.
icli = client_wrapper.IronicClientWrapper()
- node = validate_instance_and_node(icli, instance)
+ node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'off')
def power_on(self, context, instance, network_info,
@@ -690,7 +686,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
# TODO(nobodycam): check the current power state first.
icli = client_wrapper.IronicClientWrapper()
- node = validate_instance_and_node(icli, instance)
+ node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'on')
def get_host_stats(self, refresh=False):
@@ -766,8 +762,9 @@ class IronicDriver(virt_driver.ComputeDriver):
self.firewall_driver.unfilter_instance(instance, network_info)
def _plug_vifs(self, node, instance, network_info):
- LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s"
- % {'uuid': instance['uuid'], 'network_info': network_info})
+ LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s",
+ {'uuid': instance['uuid'],
+ 'network_info': network_info})
# start by ensuring the ports are clear
self._unplug_vifs(node, instance, network_info)
@@ -794,8 +791,9 @@ class IronicDriver(virt_driver.ComputeDriver):
icli.call("port.update", pif.uuid, patch)
def _unplug_vifs(self, node, instance, network_info):
- LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s"
- % {'uuid': instance['uuid'], 'network_info': network_info})
+ LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s",
+ {'uuid': instance['uuid'],
+ 'network_info': network_info})
if network_info and len(network_info) > 0:
icli = client_wrapper.IronicClientWrapper()
ports = icli.call("node.list_ports", node.uuid)
@@ -806,7 +804,7 @@ class IronicDriver(virt_driver.ComputeDriver):
patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}]
try:
icli.call("port.update", pif.uuid, patch)
- except ironic_exception.BadRequest:
+ except ironic.exc.BadRequest:
pass
def plug_vifs(self, instance, network_info):
@@ -891,8 +889,8 @@ class IronicDriver(virt_driver.ComputeDriver):
icli.call("node.set_provision_state",
node_uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
- ironic_exception.InternalServerError, # Validations
- ironic_exception.BadRequest) as e: # Maintenance
+ ironic.exc.InternalServerError, # Validations
+ ironic.exc.BadRequest) as e: # Maintenance
msg = (_("Failed to request Ironic to rebuild instance "
"%(inst)s: %(reason)s") % {'inst': instance['uuid'],
'reason': str(e)})
diff --git a/ironic/tests/api/v1/test_drivers.py b/ironic/tests/api/v1/test_drivers.py
index 1d2819c3d..bea09e382 100644
--- a/ironic/tests/api/v1/test_drivers.py
+++ b/ironic/tests/api/v1/test_drivers.py
@@ -17,6 +17,8 @@ import json
import mock
from testtools.matchers import HasLength
+from ironic.api.controllers.v1 import driver
+from ironic.common import exception
from ironic.conductor import rpcapi
from ironic.tests.api import base
@@ -47,11 +49,11 @@ class TestListDrivers(base.FunctionalTest):
self.assertThat(data['drivers'], HasLength(2))
drivers = sorted(data['drivers'])
for i in range(len(expected)):
- driver = drivers[i]
- self.assertEqual(expected[i]['name'], driver['name'])
- self.assertEqual(expected[i]['hosts'], driver['hosts'])
- self.validate_link(driver['links'][0]['href'])
- self.validate_link(driver['links'][1]['href'])
+ d = drivers[i]
+ self.assertEqual(expected[i]['name'], d['name'])
+ self.assertEqual(expected[i]['hosts'], d['hosts'])
+ self.validate_link(d['links'][0]['href'])
+ self.validate_link(d['links'][1]['href'])
def test_drivers_no_active_conductor(self):
data = self.get_json('/drivers')
@@ -103,3 +105,69 @@ class TestListDrivers(base.FunctionalTest):
error = json.loads(response.json['error_message'])
self.assertEqual('Missing argument: "method"',
error['faultstring'])
+
+
+@mock.patch.object(rpcapi.ConductorAPI, 'get_driver_properties')
+@mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for_driver')
+class TestDriverProperties(base.FunctionalTest):
+
+ def test_driver_properties_fake(self, mock_topic, mock_properties):
+ # Can get driver properties for fake driver.
+ driver._DRIVER_PROPERTIES = {}
+ driver_name = 'fake'
+ mock_topic.return_value = 'fake_topic'
+ mock_properties.return_value = {'prop1': 'Property 1. Required.'}
+ data = self.get_json('/drivers/%s/properties' % driver_name)
+ self.assertEqual(mock_properties.return_value, data)
+ mock_topic.assert_called_once_with(driver_name)
+ mock_properties.assert_called_once_with(mock.ANY, driver_name,
+ topic=mock_topic.return_value)
+ self.assertEqual(mock_properties.return_value,
+ driver._DRIVER_PROPERTIES[driver_name])
+
+ def test_driver_properties_cached(self, mock_topic, mock_properties):
+ # only one RPC-conductor call will be made and the info cached
+ # for subsequent requests
+ driver._DRIVER_PROPERTIES = {}
+ driver_name = 'fake'
+ mock_topic.return_value = 'fake_topic'
+ mock_properties.return_value = {'prop1': 'Property 1. Required.'}
+ data = self.get_json('/drivers/%s/properties' % driver_name)
+ data = self.get_json('/drivers/%s/properties' % driver_name)
+ data = self.get_json('/drivers/%s/properties' % driver_name)
+ self.assertEqual(mock_properties.return_value, data)
+ mock_topic.assert_called_once_with(driver_name)
+ mock_properties.assert_called_once_with(mock.ANY, driver_name,
+ topic=mock_topic.return_value)
+ self.assertEqual(mock_properties.return_value,
+ driver._DRIVER_PROPERTIES[driver_name])
+
+ def test_driver_properties_invalid_driver_name(self, mock_topic,
+ mock_properties):
+ # Cannot get driver properties for an invalid driver; no RPC topic
+ # exists for it.
+ driver._DRIVER_PROPERTIES = {}
+ driver_name = 'bad_driver'
+ mock_topic.side_effect = exception.DriverNotFound(
+ driver_name=driver_name)
+ mock_properties.return_value = {'prop1': 'Property 1. Required.'}
+ ret = self.get_json('/drivers/%s/properties' % driver_name,
+ expect_errors=True)
+ self.assertEqual(404, ret.status_int)
+ mock_topic.assert_called_once_with(driver_name)
+ self.assertFalse(mock_properties.called)
+
+ def test_driver_properties_cannot_load(self, mock_topic, mock_properties):
+ # Cannot get driver properties for the driver. Although an RPC topic
+ # exists for it, the conductor wasn't able to load it.
+ driver._DRIVER_PROPERTIES = {}
+ driver_name = 'driver'
+ mock_topic.return_value = 'driver_topic'
+ mock_properties.side_effect = exception.DriverNotFound(
+ driver_name=driver_name)
+ ret = self.get_json('/drivers/%s/properties' % driver_name,
+ expect_errors=True)
+ self.assertEqual(404, ret.status_int)
+ mock_topic.assert_called_once_with(driver_name)
+ mock_properties.assert_called_once_with(mock.ANY, driver_name,
+ topic=mock_topic.return_value)
diff --git a/ironic/tests/conductor/test_manager.py b/ironic/tests/conductor/test_manager.py
index 2e3c20e3f..c92c47d36 100644
--- a/ironic/tests/conductor/test_manager.py
+++ b/ironic/tests/conductor/test_manager.py
@@ -235,6 +235,16 @@ class ManagerTestCase(tests_db_base.DbTestCase):
self.service._conductor_service_record_keepalive()
mock_touch.assert_called_once_with(self.hostname)
+ def test_get_driver_known(self):
+ self._start_service()
+ driver = self.service._get_driver('fake')
+ self.assertTrue(isinstance(driver, drivers_base.BaseDriver))
+
+ def test_get_driver_unknown(self):
+ self._start_service()
+ self.assertRaises(exception.DriverNotFound,
+ self.service._get_driver, 'unknown_driver')
+
def test_change_node_power_state_power_on(self):
# Test change_node_power_state including integration with
# conductor.utils.node_power_action and lower.
@@ -720,7 +730,12 @@ class ManagerTestCase(tests_db_base.DbTestCase):
mock_spawn.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
def test_do_node_deploy_worker_pool_full(self):
- node = obj_utils.create_test_node(self.context, driver='fake')
+ prv_state = states.NOSTATE
+ tgt_prv_state = states.NOSTATE
+ node = obj_utils.create_test_node(self.context,
+ provision_state=prv_state,
+ target_provision_state=tgt_prv_state,
+ last_error=None, driver='fake')
self._start_service()
with mock.patch.object(self.service, '_spawn_worker') as mock_spawn:
@@ -733,8 +748,10 @@ class ManagerTestCase(tests_db_base.DbTestCase):
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
self.service._worker_pool.waitall()
node.refresh()
- # This is a sync operation last_error should be None.
- self.assertIsNone(node.last_error)
+ # Make sure things were rolled back
+ self.assertEqual(prv_state, node.provision_state)
+ self.assertEqual(tgt_prv_state, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@@ -818,10 +835,14 @@ class ManagerTestCase(tests_db_base.DbTestCase):
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker')
def test_do_node_tear_down_worker_pool_full(self, mock_spawn):
+ prv_state = states.ACTIVE
+ tgt_prv_state = states.NOSTATE
fake_instance_info = {'foo': 'bar'}
node = obj_utils.create_test_node(self.context, driver='fake',
- provision_state=states.ACTIVE,
- instance_info=fake_instance_info)
+ provision_state=prv_state,
+ target_provision_state=tgt_prv_state,
+ instance_info=fake_instance_info,
+ last_error=None)
self._start_service()
mock_spawn.side_effect = exception.NoFreeConductorWorker()
@@ -833,10 +854,12 @@ class ManagerTestCase(tests_db_base.DbTestCase):
self.assertEqual(exception.NoFreeConductorWorker, exc.exc_info[0])
self.service._worker_pool.waitall()
node.refresh()
- # This is a sync operation last_error should be None.
- self.assertIsNone(node.last_error)
# Assert instance_info was not touched
self.assertEqual(fake_instance_info, node.instance_info)
+ # Make sure things were rolled back
+ self.assertEqual(prv_state, node.provision_state)
+ self.assertEqual(tgt_prv_state, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
@@ -1858,3 +1881,89 @@ class ManagerCheckDeployTimeoutsTestCase(_CommonMixIn, tests_base.TestCase):
self.task)
self.assertEqual([spawn_after_call] * 2,
self.task.spawn_after.call_args_list)
+
+
+class ManagerTestProperties(tests_db_base.DbTestCase):
+
+ def setUp(self):
+ super(ManagerTestProperties, self).setUp()
+ self.service = manager.ConductorManager('test-host', 'test-topic')
+ self.context = context.get_admin_context()
+
+ def _check_driver_properties(self, driver, expected):
+ mgr_utils.mock_the_extension_manager(driver=driver)
+ self.driver = driver_factory.get_driver(driver)
+ self.service.init_host()
+ properties = self.service.get_driver_properties(self.context, driver)
+ self.assertEqual(sorted(expected), sorted(properties.keys()))
+
+ def test_driver_properties_fake(self):
+ expected = ['A1', 'A2', 'B1', 'B2']
+ self._check_driver_properties("fake", expected)
+
+ def test_driver_properties_fake_ipmitool(self):
+ expected = ['ipmi_address', 'ipmi_terminal_port',
+ 'ipmi_password', 'ipmi_priv_level',
+ 'ipmi_username']
+ self._check_driver_properties("fake_ipmitool", expected)
+
+ def test_driver_properties_fake_ipminative(self):
+ expected = ['ipmi_address', 'ipmi_password', 'ipmi_username']
+ self._check_driver_properties("fake_ipminative", expected)
+
+ def test_driver_properties_fake_ssh(self):
+ expected = ['ssh_address', 'ssh_username', 'ssh_virt_type',
+ 'ssh_key_contents', 'ssh_key_filename',
+ 'ssh_password', 'ssh_port']
+ self._check_driver_properties("fake_ssh", expected)
+
+ def test_driver_properties_fake_pxe(self):
+ expected = ['pxe_deploy_kernel', 'pxe_deploy_ramdisk']
+ self._check_driver_properties("fake_pxe", expected)
+
+ def test_driver_properties_fake_seamicro(self):
+ expected = ['seamicro_api_endpoint', 'seamicro_password',
+ 'seamicro_server_id', 'seamicro_username',
+ 'seamicro_api_version']
+ self._check_driver_properties("fake_seamicro", expected)
+
+ def test_driver_properties_pxe_ipmitool(self):
+ expected = ['ipmi_address', 'ipmi_terminal_port',
+ 'pxe_deploy_kernel', 'pxe_deploy_ramdisk',
+ 'ipmi_password', 'ipmi_priv_level',
+ 'ipmi_username']
+ self._check_driver_properties("pxe_ipmitool", expected)
+
+ def test_driver_properties_pxe_ipminative(self):
+ expected = ['ipmi_address', 'ipmi_password', 'ipmi_username',
+ 'pxe_deploy_kernel', 'pxe_deploy_ramdisk']
+ self._check_driver_properties("pxe_ipminative", expected)
+
+ def test_driver_properties_pxe_ssh(self):
+ expected = ['pxe_deploy_kernel', 'pxe_deploy_ramdisk',
+ 'ssh_address', 'ssh_username', 'ssh_virt_type',
+ 'ssh_key_contents', 'ssh_key_filename',
+ 'ssh_password', 'ssh_port']
+ self._check_driver_properties("pxe_ssh", expected)
+
+ def test_driver_properties_pxe_seamicro(self):
+ expected = ['pxe_deploy_kernel', 'pxe_deploy_ramdisk',
+ 'seamicro_api_endpoint', 'seamicro_password',
+ 'seamicro_server_id', 'seamicro_username',
+ 'seamicro_api_version']
+ self._check_driver_properties("pxe_seamicro", expected)
+
+ def test_driver_properties_ilo(self):
+ expected = ['ilo_address', 'ilo_username', 'ilo_password',
+ 'client_port', 'client_timeout']
+ self._check_driver_properties("ilo", expected)
+
+ def test_driver_properties_fail(self):
+ mgr_utils.mock_the_extension_manager()
+ self.driver = driver_factory.get_driver("fake")
+ self.service.init_host()
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.get_driver_properties,
+ self.context, "bad-driver")
+ # Compare true exception hidden by @messaging.expected_exceptions
+ self.assertEqual(exception.DriverNotFound, exc.exc_info[0])
diff --git a/ironic/tests/conductor/test_rpcapi.py b/ironic/tests/conductor/test_rpcapi.py
index da1580e85..3c6951d4b 100644
--- a/ironic/tests/conductor/test_rpcapi.py
+++ b/ironic/tests/conductor/test_rpcapi.py
@@ -221,3 +221,8 @@ class RPCAPITestCase(base.DbTestCase):
'call',
version='1.13',
port_obj=fake_port)
+
+ def test_get_driver_properties(self):
+ self._test_rpcapi('get_driver_properties',
+ 'call',
+ driver_name='fake-driver')
diff --git a/ironic/tests/db/sqlalchemy/test_migrations.py b/ironic/tests/db/sqlalchemy/test_migrations.py
index 8a643f3fc..3dde3a8ac 100644
--- a/ironic/tests/db/sqlalchemy/test_migrations.py
+++ b/ironic/tests/db/sqlalchemy/test_migrations.py
@@ -42,6 +42,7 @@ import contextlib
from alembic import script
import mock
+from oslo.db import exception
from oslo.db.sqlalchemy import test_base
from oslo.db.sqlalchemy import utils as db_utils
import sqlalchemy
@@ -307,8 +308,12 @@ class MigrationCheckersMixin(object):
'instance_uuid': instance_uuid}
nodes.insert().values(data).execute()
data['uuid'] = utils.generate_uuid()
- self.assertRaises(sqlalchemy.exc.IntegrityError,
- nodes.insert().execute, data)
+ # TODO(viktors): Remove check on sqlalchemy.exc.IntegrityError, when
+ # Ironic will use oslo.db 0.4.0 or higher.
+ # See bug #1214341 for details.
+ self.assertRaises(
+ (sqlalchemy.exc.IntegrityError, exception.DBDuplicateEntry),
+ nodes.insert().execute, data)
class TestMigrationsMySQL(MigrationCheckersMixin,