summaryrefslogtreecommitdiff
path: root/ironic
diff options
context:
space:
mode:
Diffstat (limited to 'ironic')
-rw-r--r--ironic/cmd/dbsync.py2
-rw-r--r--ironic/common/exception.py12
-rw-r--r--ironic/common/images.py17
-rw-r--r--ironic/common/pxe_utils.py24
-rw-r--r--ironic/common/raid.py48
-rw-r--r--ironic/common/release_mappings.py3
-rw-r--r--ironic/conductor/manager.py5
-rw-r--r--ironic/conductor/task_manager.py28
-rw-r--r--ironic/db/api.py79
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py56
-rw-r--r--ironic/db/sqlalchemy/api.py215
-rw-r--r--ironic/db/sqlalchemy/models.py26
-rw-r--r--ironic/drivers/ilo.py13
-rw-r--r--ironic/drivers/modules/agent.py30
-rw-r--r--ironic/drivers/modules/deploy_utils.py19
-rw-r--r--ironic/drivers/modules/ilo/raid.py235
-rw-r--r--ironic/objects/__init__.py1
-rw-r--r--ironic/objects/allocation.py300
-rw-r--r--ironic/objects/node.py9
-rw-r--r--ironic/tests/unit/api/utils.py1
-rw-r--r--ironic/tests/unit/common/test_images.py17
-rw-r--r--ironic/tests/unit/common/test_pxe_utils.py13
-rw-r--r--ironic/tests/unit/common/test_raid.py51
-rw-r--r--ironic/tests/unit/conductor/test_manager.py10
-rw-r--r--ironic/tests/unit/conductor/test_task_manager.py40
-rw-r--r--ironic/tests/unit/db/sqlalchemy/test_migrations.py51
-rw-r--r--ironic/tests/unit/db/test_allocations.py234
-rw-r--r--ironic/tests/unit/db/test_nodes.py24
-rw-r--r--ironic/tests/unit/db/utils.py30
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_raid.py342
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent.py52
-rw-r--r--ironic/tests/unit/drivers/modules/test_deploy_utils.py8
-rw-r--r--ironic/tests/unit/drivers/test_ilo.py45
-rw-r--r--ironic/tests/unit/drivers/third_party_driver_mocks.py2
-rw-r--r--ironic/tests/unit/objects/test_allocation.py144
-rw-r--r--ironic/tests/unit/objects/test_node.py61
-rw-r--r--ironic/tests/unit/objects/test_objects.py5
37 files changed, 2152 insertions, 100 deletions
diff --git a/ironic/cmd/dbsync.py b/ironic/cmd/dbsync.py
index 52012a2c3..f554cbdfc 100644
--- a/ironic/cmd/dbsync.py
+++ b/ironic/cmd/dbsync.py
@@ -81,6 +81,8 @@ ONLINE_MIGRATIONS = (
# These are the models added in supported releases. We skip the version check
# for them since the tables do not exist when it happens.
NEW_MODELS = [
+ # TODO(dtantsur): remove in Train
+ 'Allocation',
]
diff --git a/ironic/common/exception.py b/ironic/common/exception.py
index f38caf04c..cba02d5a0 100644
--- a/ironic/common/exception.py
+++ b/ironic/common/exception.py
@@ -791,3 +791,15 @@ class AgentConnectionFailed(IronicException):
class NodeProtected(HTTPForbidden):
_msg_fmt = _("Node %(node)s is protected and cannot be undeployed, "
"rebuilt or deleted")
+
+
+class AllocationNotFound(NotFound):
+ _msg_fmt = _("Allocation %(allocation)s could not be found.")
+
+
+class AllocationDuplicateName(Conflict):
+ _msg_fmt = _("An allocation with name %(name)s already exists.")
+
+
+class AllocationAlreadyExists(Conflict):
+ _msg_fmt = _("An allocation with UUID %(uuid)s already exists.")
diff --git a/ironic/common/images.py b/ironic/common/images.py
index 842bd5452..65fd48267 100644
--- a/ironic/common/images.py
+++ b/ironic/common/images.py
@@ -243,17 +243,12 @@ def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk,
:raises: ImageCreationFailed, if image creation failed while copying files
or while running command to generate iso.
"""
- ISOLINUX_BIN = 'isolinux/isolinux.bin'
- ISOLINUX_CFG = 'isolinux/isolinux.cfg'
-
- isolinux_options = {'kernel': '/vmlinuz', 'ramdisk': '/initrd'}
grub_options = {'linux': '/vmlinuz', 'initrd': '/initrd'}
with utils.tempdir() as tmpdir:
files_info = {
kernel: 'vmlinuz',
ramdisk: 'initrd',
- CONF.isolinux_bin: ISOLINUX_BIN,
}
# Open the deploy iso used to initiate deploy and copy the
@@ -274,12 +269,6 @@ def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk,
finally:
_umount_without_raise(mountdir)
- cfg = _generate_cfg(kernel_params,
- CONF.isolinux_config_template, isolinux_options)
-
- isolinux_cfg = os.path.join(tmpdir, ISOLINUX_CFG)
- utils.write_to_file(isolinux_cfg, cfg)
-
# Generate and copy grub config file.
grub_cfg = os.path.join(tmpdir, grub_rel_path)
grub_conf = _generate_cfg(kernel_params,
@@ -288,12 +277,10 @@ def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk,
# Create the boot_iso.
try:
- utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO",
- '-cache-inodes', '-J', '-l', '-no-emul-boot',
- '-boot-load-size', '4', '-boot-info-table',
- '-b', ISOLINUX_BIN, '-eltorito-alt-boot',
+ utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO", '-l',
'-e', e_img_rel_path, '-no-emul-boot',
'-o', output_file, tmpdir)
+
except processutils.ProcessExecutionError as e:
LOG.exception("Creating ISO image failed.")
raise exception.ImageCreationFailed(image_type='iso', error=e)
diff --git a/ironic/common/pxe_utils.py b/ironic/common/pxe_utils.py
index e6d631d38..0a0ff400c 100644
--- a/ironic/common/pxe_utils.py
+++ b/ironic/common/pxe_utils.py
@@ -465,15 +465,35 @@ def dhcp_options_for_instance(task, ipxe_enabled=False, url_boot=False):
# if the request comes from dumb firmware send them the iPXE
# boot image.
if dhcp_provider_name == 'neutron':
- # Neutron use dnsmasq as default DHCP agent, add extra config
- # to neutron "dhcp-match=set:ipxe,175" and use below option
+ # Neutron use dnsmasq as default DHCP agent. Neutron carries the
+ # configuration to relate to the tags below. The ipxe6 tag was
+ # added in the Stein cycle which identifies the iPXE User-Class
+ # directly and is only sent in DHCPv6.
+
+ # NOTE(TheJulia): Lets send both, let neutron tag/sort it out as
+ # an ip_version field is also transmitted. Plus, given the
+ # semi-obscure nature of this, being more verbose and letting
+ # the DHCP server do the best thing possible is likely the best
+ # course of action.
dhcp_opts.append({'opt_name': "tag:!ipxe,%s" % boot_file_param,
'opt_value': boot_file})
+ dhcp_opts.append({'opt_name': "tag:!ipxe6,%s" % boot_file_param,
+ 'opt_value': boot_file})
dhcp_opts.append({'opt_name': "tag:ipxe,%s" % boot_file_param,
'opt_value': ipxe_script_url})
+ dhcp_opts.append({'opt_name': "tag:ipxe6,%s" % boot_file_param,
+ 'opt_value': ipxe_script_url})
else:
# !175 == non-iPXE.
# http://ipxe.org/howto/dhcpd#ipxe-specific_options
+ if ip_version == 6:
+ LOG.warning('IPv6 is enabled and the DHCP driver appears set '
+ 'to a plugin aside from "neutron". Node %(name)s '
+ 'may not receive proper DHCPv6 provided '
+ 'boot parameters.'.format(name=task.node.uuid))
+ # NOTE(TheJulia): This was added for ISC DHCPd support, however it
+ # appears that isc support was never added to neutron and is likely
+ # a down stream driver.
dhcp_opts.append({'opt_name': "!%s,%s" % (DHCP_IPXE_ENCAP_OPTS,
boot_file_param),
'opt_value': boot_file})
diff --git a/ironic/common/raid.py b/ironic/common/raid.py
index 54a3419c0..3f503beb3 100644
--- a/ironic/common/raid.py
+++ b/ironic/common/raid.py
@@ -126,3 +126,51 @@ def update_raid_info(node, raid_config):
node.properties = properties
node.save()
+
+
+def filter_target_raid_config(
+ node, create_root_volume=True, create_nonroot_volumes=True):
+ """Filter the target raid config based on root volume creation
+
+ This method can be used by any raid interface which wants to filter
+ out target raid config based on condition whether the root volume
+ will be created or not.
+
+ :param node: a node object
+ :param create_root_volume: A boolean default value True governing
+ if the root volume is returned else root volumes will be filtered
+ out.
+ :param create_nonroot_volumes: A boolean default value True governing
+ if the non root volume is returned else non-root volumes will be
+ filtered out.
+ :raises: MissingParameterValue, if node.target_raid_config is missing
+ or was found to be empty after skipping root volume and/or non-root
+ volumes.
+ :returns: It will return filtered target_raid_config
+ """
+ if not node.target_raid_config:
+ raise exception.MissingParameterValue(
+ _("Node %s has no target RAID configuration.") % node.uuid)
+
+ target_raid_config = node.target_raid_config.copy()
+
+ error_msg_list = []
+ if not create_root_volume:
+ target_raid_config['logical_disks'] = [
+ x for x in target_raid_config['logical_disks']
+ if not x.get('is_root_volume')]
+ error_msg_list.append(_("skipping root volume"))
+
+ if not create_nonroot_volumes:
+ target_raid_config['logical_disks'] = [
+ x for x in target_raid_config['logical_disks']
+ if x.get('is_root_volume')]
+ error_msg_list.append(_("skipping non-root volumes"))
+
+ if not target_raid_config['logical_disks']:
+ error_msg = _(' and ').join(error_msg_list)
+ raise exception.MissingParameterValue(
+ _("Node %(node)s has empty target RAID configuration "
+ "after %(msg)s.") % {'node': node.uuid, 'msg': error_msg})
+
+ return target_raid_config
diff --git a/ironic/common/release_mappings.py b/ironic/common/release_mappings.py
index 883a4910b..0486993ac 100644
--- a/ironic/common/release_mappings.py
+++ b/ironic/common/release_mappings.py
@@ -134,7 +134,8 @@ RELEASE_MAPPING = {
'api': '1.50',
'rpc': '1.47',
'objects': {
- 'Node': ['1.30', '1.29', '1.28'],
+ 'Allocation': ['1.0'],
+ 'Node': ['1.31', '1.30', '1.29', '1.28'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Port': ['1.8'],
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 0c1430933..65e6dc2a9 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -219,7 +219,12 @@ class ConductorManager(base_manager.BaseConductorManager):
driver_factory.check_and_update_node_interfaces(node_obj)
+ # NOTE(dtantsur): if we're updating the driver from an invalid value,
+ # loading the old driver may be impossible. Since we only need to
+ # update the node record in the database, skip loading the driver
+ # completely.
with task_manager.acquire(context, node_id, shared=False,
+ load_driver=False,
purpose='node update') as task:
# Prevent instance_uuid overwriting
if ('instance_uuid' in delta and node_obj.instance_uuid
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index b32d33a71..804d8e4a2 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -149,20 +149,16 @@ def require_exclusive_lock(f):
return wrapper
-def acquire(context, node_id, shared=False, purpose='unspecified action'):
+def acquire(context, *args, **kwargs):
"""Shortcut for acquiring a lock on a Node.
:param context: Request context.
- :param node_id: ID or UUID of node to lock.
- :param shared: Boolean indicating whether to take a shared or exclusive
- lock. Default: False.
- :param purpose: human-readable purpose to put to debug logs.
:returns: An instance of :class:`TaskManager`.
"""
# NOTE(lintan): This is a workaround to set the context of periodic tasks.
context.ensure_thread_contain_context()
- return TaskManager(context, node_id, shared=shared, purpose=purpose)
+ return TaskManager(context, *args, **kwargs)
class TaskManager(object):
@@ -174,7 +170,8 @@ class TaskManager(object):
"""
def __init__(self, context, node_id, shared=False,
- purpose='unspecified action'):
+ purpose='unspecified action', retry=True,
+ load_driver=True):
"""Create a new TaskManager.
Acquire a lock on a node. The lock can be either shared or
@@ -187,6 +184,10 @@ class TaskManager(object):
:param shared: Boolean indicating whether to take a shared or exclusive
lock. Default: False.
:param purpose: human-readable purpose to put to debug logs.
+ :param retry: whether to retry locking if it fails. Default: True.
+ :param load_driver: whether to load the ``driver`` object. Set this to
+ False if loading the driver is undesired or
+ impossible.
:raises: DriverNotFound
:raises: InterfaceNotFoundInEntrypoint
:raises: NodeNotFound
@@ -201,6 +202,7 @@ class TaskManager(object):
self._node = None
self.node_id = node_id
self.shared = shared
+ self._retry = retry
self.fsm = states.machine.copy()
self._purpose = purpose
@@ -231,7 +233,10 @@ class TaskManager(object):
context, self.node.id)
self.volume_targets = objects.VolumeTarget.list_by_node_id(
context, self.node.id)
- self.driver = driver_factory.build_driver_for_task(self)
+ if load_driver:
+ self.driver = driver_factory.build_driver_for_task(self)
+ else:
+ self.driver = None
except Exception:
with excutils.save_and_reraise_exception():
@@ -251,12 +256,17 @@ class TaskManager(object):
def _lock(self):
self._debug_timer.restart()
+ if self._retry:
+ attempts = CONF.conductor.node_locked_retry_attempts
+ else:
+ attempts = 1
+
# NodeLocked exceptions can be annoying. Let's try to alleviate
# some of that pain by retrying our lock attempts. The retrying
# module expects a wait_fixed value in milliseconds.
@retrying.retry(
retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
- stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
+ stop_max_attempt_number=attempts,
wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
def reserve_node():
self.node = objects.Node.reserve(self.context, CONF.host,
diff --git a/ironic/db/api.py b/ironic/db/api.py
index c98880c7f..e43dc55da 100644
--- a/ironic/db/api.py
+++ b/ironic/db/api.py
@@ -1079,3 +1079,82 @@ class Connection(object):
:returns: A list of BIOSSetting objects.
:raises: NodeNotFound if the node is not found.
"""
+
+ @abc.abstractmethod
+ def get_allocation_by_id(self, allocation_id):
+ """Return an allocation representation.
+
+ :param allocation_id: The id of an allocation.
+ :returns: An allocation.
+ :raises: AllocationNotFound
+ """
+
+ @abc.abstractmethod
+ def get_allocation_by_uuid(self, allocation_uuid):
+ """Return an allocation representation.
+
+ :param allocation_uuid: The uuid of an allocation.
+ :returns: An allocation.
+ :raises: AllocationNotFound
+ """
+
+ @abc.abstractmethod
+ def get_allocation_by_name(self, name):
+ """Return an allocation representation.
+
+ :param name: The logical name of an allocation.
+ :returns: An allocation.
+ :raises: AllocationNotFound
+ """
+
+ @abc.abstractmethod
+ def get_allocation_list(self, filters=None, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of allocations.
+
+ :param filters: Filters to apply. Defaults to None.
+
+ :node_uuid: uuid of node
+ :state: allocation state
+ :resource_class: requested resource class
+ :param limit: Maximum number of allocations to return.
+ :param marker: The last item of the previous page; we return the next
+ result set.
+ :param sort_key: Attribute by which results should be sorted.
+ :param sort_dir: Direction in which results should be sorted.
+ (asc, desc)
+ :returns: A list of allocations.
+ """
+
+ @abc.abstractmethod
+ def create_allocation(self, values):
+ """Create a new allocation.
+
+ :param values: Dict of values to create an allocation with
+ :returns: An allocation
+ :raises: AllocationDuplicateName
+ :raises: AllocationAlreadyExists
+ """
+
+ @abc.abstractmethod
+ def update_allocation(self, allocation_id, values, update_node=True):
+ """Update properties of an allocation.
+
+ :param allocation_id: Allocation ID
+ :param values: Dict of values to update.
+ :param update_node: If True and node_id is updated, update the node
+ with instance_uuid and traits from the allocation
+ :returns: An allocation.
+ :raises: AllocationNotFound
+ :raises: AllocationDuplicateName
+ :raises: InstanceAssociated
+ :raises: NodeAssociated
+ """
+
+ @abc.abstractmethod
+ def destroy_allocation(self, allocation_id):
+ """Destroy an allocation.
+
+ :param allocation_id: Allocation ID
+ :raises: AllocationNotFound
+ """
diff --git a/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py b/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py
new file mode 100644
index 000000000..55560dc68
--- /dev/null
+++ b/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py
@@ -0,0 +1,56 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add Allocations table
+
+Revision ID: dd67b91a1981
+Revises: f190f9d00a11
+Create Date: 2018-12-10 15:24:30.555995
+
+"""
+
+from alembic import op
+from oslo_db.sqlalchemy import types
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision = 'dd67b91a1981'
+down_revision = 'f190f9d00a11'
+
+
+def upgrade():
+ op.create_table(
+ 'allocations',
+ sa.Column('created_at', sa.DateTime(), nullable=True),
+ sa.Column('updated_at', sa.DateTime(), nullable=True),
+ sa.Column('version', sa.String(length=15), nullable=True),
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=36), nullable=False),
+ sa.Column('name', sa.String(length=255), nullable=True),
+ sa.Column('node_id', sa.Integer(), nullable=True),
+ sa.Column('state', sa.String(length=15), nullable=False),
+ sa.Column('last_error', sa.Text(), nullable=True),
+ sa.Column('resource_class', sa.String(length=80), nullable=True),
+ sa.Column('traits', types.JsonEncodedList(), nullable=True),
+ sa.Column('candidate_nodes', types.JsonEncodedList(), nullable=True),
+ sa.Column('extra', types.JsonEncodedDict(), nullable=True),
+ sa.Column('conductor_affinity', sa.Integer(), nullable=True),
+ sa.ForeignKeyConstraint(['conductor_affinity'], ['conductors.id'], ),
+ sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('name', name='uniq_allocations0name'),
+ sa.UniqueConstraint('uuid', name='uniq_allocations0uuid')
+ )
+ op.add_column('nodes', sa.Column('allocation_id', sa.Integer(),
+ nullable=True))
+ op.create_foreign_key(None, 'nodes', 'allocations',
+ ['allocation_id'], ['id'])
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index 2c096f002..a6d110dc1 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -182,6 +182,15 @@ def add_node_filter_by_chassis(query, value):
return query.filter(models.Chassis.uuid == value)
+def add_allocation_filter_by_node(query, value):
+ if strutils.is_int_like(value):
+ return query.filter_by(node_id=value)
+ else:
+ query = query.join(models.Node,
+ models.Allocation.node_id == models.Node.id)
+ return query.filter(models.Node.uuid == value)
+
+
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
@@ -224,7 +233,8 @@ class Connection(api.Connection):
'chassis_uuid', 'associated', 'reserved',
'reserved_by_any_of', 'provisioned_before',
'inspection_started_before', 'fault',
- 'conductor_group', 'owner'}
+ 'conductor_group', 'owner',
+ 'uuid_in', 'with_power_state'}
unsupported_filters = set(filters).difference(supported_filters)
if unsupported_filters:
msg = _("SqlAlchemy API does not support "
@@ -263,9 +273,37 @@ class Connection(api.Connection):
- (datetime.timedelta(
seconds=filters['inspection_started_before'])))
query = query.filter(models.Node.inspection_started_at < limit)
+ if 'uuid_in' in filters:
+ query = query.filter(models.Node.uuid.in_(filters['uuid_in']))
+ if 'with_power_state' in filters:
+ if filters['with_power_state']:
+ query = query.filter(models.Node.power_state != sql.null())
+ else:
+ query = query.filter(models.Node.power_state == sql.null())
return query
+ def _add_allocations_filters(self, query, filters):
+ if filters is None:
+ filters = dict()
+ supported_filters = {'state', 'resource_class', 'node_uuid'}
+ unsupported_filters = set(filters).difference(supported_filters)
+ if unsupported_filters:
+ msg = _("SqlAlchemy API does not support "
+ "filtering by %s") % ', '.join(unsupported_filters)
+ raise ValueError(msg)
+
+ try:
+ node_uuid = filters.pop('node_uuid')
+ except KeyError:
+ pass
+ else:
+ query = add_allocation_filter_by_node(query, node_uuid)
+
+ if filters:
+ query = query.filter_by(**filters)
+ return query
+
def get_nodeinfo_list(self, columns=None, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
# list-ify columns default values because it is bad form
@@ -452,6 +490,11 @@ class Connection(api.Connection):
models.BIOSSetting).filter_by(node_id=node_id)
bios_settings_query.delete()
+ # delete all allocations for this node
+ allocation_query = model_query(
+ models.Allocation).filter_by(node_id=node_id)
+ allocation_query.delete()
+
query.delete()
def update_node(self, node_id, values):
@@ -1482,3 +1525,173 @@ class Connection(api.Connection):
.filter_by(node_id=node_id)
.all())
return result
+
+ def get_allocation_by_id(self, allocation_id):
+ """Return an allocation representation.
+
+ :param allocation_id: The id of an allocation.
+ :returns: An allocation.
+ :raises: AllocationNotFound
+ """
+ query = model_query(models.Allocation).filter_by(id=allocation_id)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.AllocationNotFound(allocation=allocation_id)
+
+ def get_allocation_by_uuid(self, allocation_uuid):
+ """Return an allocation representation.
+
+ :param allocation_uuid: The uuid of an allocation.
+ :returns: An allocation.
+ :raises: AllocationNotFound
+ """
+ query = model_query(models.Allocation).filter_by(uuid=allocation_uuid)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.AllocationNotFound(allocation=allocation_uuid)
+
+ def get_allocation_by_name(self, name):
+ """Return an allocation representation.
+
+ :param name: The logical name of an allocation.
+ :returns: An allocation.
+ :raises: AllocationNotFound
+ """
+ query = model_query(models.Allocation).filter_by(name=name)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.AllocationNotFound(allocation=name)
+
+ def get_allocation_list(self, filters=None, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of allocations.
+
+ :param filters: Filters to apply. Defaults to None.
+
+ :node_uuid: uuid of node
+ :state: allocation state
+ :resource_class: requested resource class
+ :param limit: Maximum number of allocations to return.
+ :param marker: The last item of the previous page; we return the next
+ result set.
+ :param sort_key: Attribute by which results should be sorted.
+ :param sort_dir: Direction in which results should be sorted.
+ (asc, desc)
+ :returns: A list of allocations.
+ """
+ query = self._add_allocations_filters(model_query(models.Allocation),
+ filters)
+ return _paginate_query(models.Allocation, limit, marker,
+ sort_key, sort_dir, query)
+
+ @oslo_db_api.retry_on_deadlock
+ def create_allocation(self, values):
+ """Create a new allocation.
+
+ :param values: Dict of values to create an allocation with
+ :returns: An allocation
+ :raises: AllocationDuplicateName
+ :raises: AllocationAlreadyExists
+ """
+ if not values.get('uuid'):
+ values['uuid'] = uuidutils.generate_uuid()
+
+ allocation = models.Allocation()
+ allocation.update(values)
+ with _session_for_write() as session:
+ try:
+ session.add(allocation)
+ session.flush()
+ except db_exc.DBDuplicateEntry as exc:
+ if 'name' in exc.columns:
+ raise exception.AllocationDuplicateName(
+ name=values['name'])
+ else:
+ raise exception.AllocationAlreadyExists(
+ uuid=values['uuid'])
+ return allocation
+
+ @oslo_db_api.retry_on_deadlock
+ def update_allocation(self, allocation_id, values, update_node=True):
+ """Update properties of an allocation.
+
+ :param allocation_id: Allocation ID
+ :param values: Dict of values to update.
+ :param update_node: If True and node_id is updated, update the node
+ with instance_uuid and traits from the allocation
+ :returns: An allocation.
+ :raises: AllocationNotFound
+ :raises: AllocationDuplicateName
+ :raises: InstanceAssociated
+ :raises: NodeAssociated
+ """
+ if 'uuid' in values:
+ msg = _("Cannot overwrite UUID for an existing allocation.")
+ raise exception.InvalidParameterValue(err=msg)
+
+ # These values are used in exception handling. They should always be
+ # initialized, but set them to None just in case.
+ instance_uuid = node_uuid = None
+
+ with _session_for_write() as session:
+ try:
+ query = model_query(models.Allocation, session=session)
+ query = add_identity_filter(query, allocation_id)
+ ref = query.one()
+ ref.update(values)
+ instance_uuid = ref.uuid
+
+ if 'node_id' in values and update_node:
+ node = model_query(models.Node, session=session).filter_by(
+ id=ref.node_id).with_lockmode('update').one()
+ node_uuid = node.uuid
+ if node.instance_uuid and node.instance_uuid != ref.uuid:
+ raise exception.NodeAssociated(
+ node=node.uuid, instance=node.instance_uuid)
+ iinfo = node.instance_info.copy()
+ iinfo['traits'] = ref.traits or []
+ node.update({'allocation_id': ref.id,
+ 'instance_uuid': instance_uuid,
+ 'instance_info': iinfo})
+ session.flush()
+ except NoResultFound:
+ raise exception.AllocationNotFound(allocation=allocation_id)
+ except db_exc.DBDuplicateEntry as exc:
+ if 'name' in exc.columns:
+ raise exception.AllocationDuplicateName(
+ name=values['name'])
+ elif 'instance_uuid' in exc.columns:
+ # Case when the allocation UUID is already used on some
+ # node as instance_uuid.
+ raise exception.InstanceAssociated(
+ instance_uuid=instance_uuid, node=node_uuid)
+ else:
+ raise
+ return ref
+
+ @oslo_db_api.retry_on_deadlock
+ def destroy_allocation(self, allocation_id):
+ """Destroy an allocation.
+
+ :param allocation_id: Allocation ID or UUID
+ :raises: AllocationNotFound
+ """
+ with _session_for_write() as session:
+ query = model_query(models.Allocation)
+ query = add_identity_filter(query, allocation_id)
+
+ try:
+ ref = query.one()
+ except NoResultFound:
+ raise exception.AllocationNotFound(allocation=allocation_id)
+
+ allocation_id = ref['id']
+
+ node_query = model_query(models.Node, session=session).filter_by(
+ allocation_id=allocation_id)
+ node_query.update({'allocation_id': None, 'instance_uuid': None})
+
+ query.delete()
diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py
index 2a17dfa65..db76a9dbd 100644
--- a/ironic/db/sqlalchemy/models.py
+++ b/ironic/db/sqlalchemy/models.py
@@ -180,6 +180,9 @@ class Node(Base):
server_default=false())
protected_reason = Column(Text, nullable=True)
owner = Column(String(255), nullable=True)
+ allocation_id = Column(Integer, ForeignKey('allocations.id'),
+ nullable=True)
+
bios_interface = Column(String(255), nullable=True)
boot_interface = Column(String(255), nullable=True)
console_interface = Column(String(255), nullable=True)
@@ -322,6 +325,29 @@ class BIOSSetting(Base):
value = Column(Text, nullable=True)
+class Allocation(Base):
+ """Represents an allocation of a node for deployment."""
+
+ __tablename__ = 'allocations'
+ __table_args__ = (
+ schema.UniqueConstraint('name', name='uniq_allocations0name'),
+ schema.UniqueConstraint('uuid', name='uniq_allocations0uuid'),
+ table_args())
+ id = Column(Integer, primary_key=True)
+ uuid = Column(String(36), nullable=False)
+ name = Column(String(255), nullable=True)
+ node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True)
+ state = Column(String(15), nullable=False)
+ last_error = Column(Text, nullable=True)
+ resource_class = Column(String(80), nullable=True)
+ traits = Column(db_types.JsonEncodedList)
+ candidate_nodes = Column(db_types.JsonEncodedList)
+ extra = Column(db_types.JsonEncodedDict)
+ # The last conductor to handle this allocation (internal field).
+ conductor_affinity = Column(Integer, ForeignKey('conductors.id'),
+ nullable=True)
+
+
def get_class(model_name):
"""Returns the model class with the specified name.
diff --git a/ironic/drivers/ilo.py b/ironic/drivers/ilo.py
index 4cf4a6317..3540c6944 100644
--- a/ironic/drivers/ilo.py
+++ b/ironic/drivers/ilo.py
@@ -22,6 +22,7 @@ from ironic.drivers.modules.ilo import console
from ironic.drivers.modules.ilo import inspect
from ironic.drivers.modules.ilo import management
from ironic.drivers.modules.ilo import power
+from ironic.drivers.modules.ilo import raid
from ironic.drivers.modules.ilo import vendor
from ironic.drivers.modules import inspector
from ironic.drivers.modules import noop
@@ -69,3 +70,15 @@ class IloHardware(generic.GenericHardware):
def supported_vendor_interfaces(self):
"""List of supported power interfaces."""
return [vendor.VendorPassthru, noop.NoVendor]
+
+
+class Ilo5Hardware(IloHardware):
+ """iLO5 hardware type.
+
+ iLO5 hardware type is targeted for iLO5 based Proliant Gen10 servers.
+ """
+
+ @property
+ def supported_raid_interfaces(self):
+ """List of supported raid interfaces."""
+ return [raid.Ilo5RAID, noop.NoRAID]
diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py
index 020ebbc1f..c9344ab15 100644
--- a/ironic/drivers/modules/agent.py
+++ b/ironic/drivers/modules/agent.py
@@ -698,32 +698,10 @@ class AgentRAID(base.RAIDInterface):
'create_nonroot_volumes': create_nonroot_volumes,
'target_raid_config': node.target_raid_config})
- if not node.target_raid_config:
- raise exception.MissingParameterValue(
- _("Node %s has no target RAID configuration.") % node.uuid)
-
- target_raid_config = node.target_raid_config.copy()
-
- error_msg_list = []
- if not create_root_volume:
- target_raid_config['logical_disks'] = [
- x for x in target_raid_config['logical_disks']
- if not x.get('is_root_volume')]
- error_msg_list.append(_("skipping root volume"))
-
- if not create_nonroot_volumes:
- error_msg_list.append(_("skipping non-root volumes"))
-
- target_raid_config['logical_disks'] = [
- x for x in target_raid_config['logical_disks']
- if x.get('is_root_volume')]
-
- if not target_raid_config['logical_disks']:
- error_msg = _(' and ').join(error_msg_list)
- raise exception.MissingParameterValue(
- _("Node %(node)s has empty target RAID configuration "
- "after %(msg)s.") % {'node': node.uuid, 'msg': error_msg})
-
+ target_raid_config = raid.filter_target_raid_config(
+ node,
+ create_root_volume=create_root_volume,
+ create_nonroot_volumes=create_nonroot_volumes)
# Rewrite it back to the node object, but no need to save it as
# we need to just send this to the agent ramdisk.
node.driver_internal_info['target_raid_config'] = target_raid_config
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index a4deae971..ed73ed20b 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -735,19 +735,22 @@ def get_pxe_config_template(node):
"""Return the PXE config template file name requested for deploy.
This method returns PXE config template file to be used for deploy.
- Architecture specific template file is searched first. BIOS/UEFI
- template file is used if no valid architecture specific file found.
+ First specific pxe template is searched in the node. After that
+ architecture specific template file is searched. BIOS/UEFI template file
+ is used if no valid architecture specific file found.
:param node: A single Node.
:returns: The PXE config template file name.
"""
- cpu_arch = node.properties.get('cpu_arch')
- config_template = CONF.pxe.pxe_config_template_by_arch.get(cpu_arch)
+ config_template = node.driver_info.get("pxe_template", None)
if config_template is None:
- if boot_mode_utils.get_boot_mode(node) == 'uefi':
- config_template = CONF.pxe.uefi_pxe_config_template
- else:
- config_template = CONF.pxe.pxe_config_template
+ cpu_arch = node.properties.get('cpu_arch')
+ config_template = CONF.pxe.pxe_config_template_by_arch.get(cpu_arch)
+ if config_template is None:
+ if boot_mode_utils.get_boot_mode(node) == 'uefi':
+ config_template = CONF.pxe.uefi_pxe_config_template
+ else:
+ config_template = CONF.pxe.pxe_config_template
return config_template
diff --git a/ironic/drivers/modules/ilo/raid.py b/ironic/drivers/modules/ilo/raid.py
new file mode 100644
index 000000000..07d695133
--- /dev/null
+++ b/ironic/drivers/modules/ilo/raid.py
@@ -0,0 +1,235 @@
+# Copyright 2018 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+iLO5 RAID specific methods
+"""
+
+from ironic_lib import metrics_utils
+from oslo_log import log as logging
+from oslo_utils import importutils
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import raid
+from ironic.common import states
+from ironic.conductor import utils as manager_utils
+from ironic import conf
+from ironic.drivers import base
+from ironic.drivers.modules import deploy_utils
+from ironic.drivers.modules.ilo import common as ilo_common
+
+
+LOG = logging.getLogger(__name__)
+CONF = conf.CONF
+METRICS = metrics_utils.get_metrics_logger(__name__)
+
+ilo_error = importutils.try_import('proliantutils.exception')
+
+
+class Ilo5RAID(base.RAIDInterface):
+ """Implementation of OOB RAIDInterface for iLO5."""
+
+ def get_properties(self):
+ """Return the properties of the interface."""
+ return ilo_common.REQUIRED_PROPERTIES
+
+ def _set_clean_failed(self, task, msg, exc):
+ LOG.error("RAID configuration job failed for node %(node)s. "
+ "Message: '%(message)s'.",
+ {'node': task.node.uuid, 'message': msg})
+ task.node.last_error = msg
+ task.process_event('fail')
+
+ def _set_driver_internal_true_value(self, task, *keys):
+ driver_internal_info = task.node.driver_internal_info
+ for key in keys:
+ driver_internal_info[key] = True
+ task.node.driver_internal_info = driver_internal_info
+ task.node.save()
+
+ def _set_driver_internal_false_value(self, task, *keys):
+ driver_internal_info = task.node.driver_internal_info
+ for key in keys:
+ driver_internal_info[key] = False
+ task.node.driver_internal_info = driver_internal_info
+ task.node.save()
+
+ def _pop_driver_internal_values(self, task, *keys):
+ driver_internal_info = task.node.driver_internal_info
+ for key in keys:
+ driver_internal_info.pop(key, None)
+ task.node.driver_internal_info = driver_internal_info
+ task.node.save()
+
+ def _prepare_for_read_raid(self, task, raid_step):
+ deploy_opts = deploy_utils.build_agent_options(task.node)
+ task.driver.boot.prepare_ramdisk(task, deploy_opts)
+ manager_utils.node_power_action(task, states.REBOOT)
+ if raid_step == 'create_raid':
+ self._set_driver_internal_true_value(
+ task, 'ilo_raid_create_in_progress')
+ else:
+ self._set_driver_internal_true_value(
+ task, 'ilo_raid_delete_in_progress')
+ self._set_driver_internal_true_value(task, 'cleaning_reboot')
+ self._set_driver_internal_false_value(task, 'skip_current_clean_step')
+
+ @METRICS.timer('Ilo5RAID.create_configuration')
+ @base.clean_step(priority=0, abortable=False, argsinfo={
+ 'create_root_volume': {
+ 'description': (
+ 'This specifies whether to create the root volume. '
+ 'Defaults to `True`.'
+ ),
+ 'required': False
+ },
+ 'create_nonroot_volumes': {
+ 'description': (
+ 'This specifies whether to create the non-root volumes. '
+ 'Defaults to `True`.'
+ ),
+ 'required': False
+ }
+ })
+ def create_configuration(self, task, create_root_volume=True,
+ create_nonroot_volumes=True):
+ """Create a RAID configuration on a bare metal using agent ramdisk.
+
+ This method creates a RAID configuration on the given node.
+
+ :param task: a TaskManager instance.
+ :param create_root_volume: If True, a root volume is created
+ during RAID configuration. Otherwise, no root volume is
+ created. Default is True.
+ :param create_nonroot_volumes: If True, non-root volumes are
+ created. If False, no non-root volumes are created. Default
+ is True.
+ :raises: MissingParameterValue, if node.target_raid_config is missing
+ or was found to be empty after skipping root volume and/or non-root
+ volumes.
+ :raises: NodeCleaningFailure, on failure to execute step.
+ """
+ node = task.node
+ target_raid_config = raid.filter_target_raid_config(
+ node, create_root_volume=create_root_volume,
+ create_nonroot_volumes=create_nonroot_volumes)
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['target_raid_config'] = target_raid_config
+ LOG.debug("Calling OOB RAID create_configuration for node %(node)s "
+ "with the following target RAID configuration: %(target)s",
+ {'node': node.uuid, 'target': target_raid_config})
+ ilo_object = ilo_common.get_ilo_object(node)
+
+ try:
+ # Raid configuration in progress, checking status
+ if not driver_internal_info.get('ilo_raid_create_in_progress'):
+ ilo_object.create_raid_configuration(target_raid_config)
+ self._prepare_for_read_raid(task, 'create_raid')
+ return states.CLEANWAIT
+ else:
+ # Raid configuration is done, updating raid_config
+ raid_conf = (
+ ilo_object.read_raid_configuration(
+ raid_config=target_raid_config))
+ if len(raid_conf['logical_disks']):
+ raid.update_raid_info(node, raid_conf)
+ LOG.debug("Node %(uuid)s raid create clean step is done.",
+ {'uuid': node.uuid})
+ self._pop_driver_internal_values(
+ task, 'ilo_raid_create_in_progress',
+ 'cleaning_reboot', 'skip_current_clean_step')
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ else:
+ # Raid configuration failed
+ msg = "Unable to create raid"
+ self._pop_driver_internal_values(
+ task, 'ilo_raid_create_in_progress',
+ 'cleaning_reboot', 'skip_current_clean_step')
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ raise exception.NodeCleaningFailure(
+ "Clean step create_configuration failed "
+ "on node %(node)s with error: %(err)s" %
+ {'node': node.uuid, 'err': msg})
+ except ilo_error.IloError as ilo_exception:
+ operation = (_("Failed to create raid configuration on node %s")
+ % node.uuid)
+ self._pop_driver_internal_values(task,
+ 'ilo_raid_create_in_progress',
+ 'cleaning_reboot',
+ 'skip_current_clean_step')
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ self._set_clean_failed(task, operation, ilo_exception)
+
+ @METRICS.timer('Ilo5RAID.delete_configuration')
+ @base.clean_step(priority=0, abortable=False)
+ def delete_configuration(self, task):
+ """Delete the RAID configuration.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :raises: NodeCleaningFailure, on failure to execute step.
+ """
+ node = task.node
+ LOG.debug("OOB RAID delete_configuration invoked for node %s.",
+ node.uuid)
+ driver_internal_info = node.driver_internal_info
+ ilo_object = ilo_common.get_ilo_object(node)
+
+ try:
+ # Raid configuration in progress, checking status
+ if not driver_internal_info.get('ilo_raid_delete_in_progress'):
+ ilo_object.delete_raid_configuration()
+ self._prepare_for_read_raid(task, 'delete_raid')
+ return states.CLEANWAIT
+ else:
+ # Raid configuration is done, updating raid_config
+ raid_conf = ilo_object.read_raid_configuration()
+ if not len(raid_conf['logical_disks']):
+ node.raid_config = {}
+ LOG.debug("Node %(uuid)s raid delete clean step is done.",
+ {'uuid': node.uuid})
+ self._pop_driver_internal_values(
+ task, 'ilo_raid_delete_in_progress',
+ 'cleaning_reboot', 'skip_current_clean_step')
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ else:
+ # Raid configuration failed
+ msg = ("Unable to delete this logical disks: %s" %
+ raid_conf['logical_disks'])
+ self._pop_driver_internal_values(
+ task, 'ilo_raid_delete_in_progress',
+ 'cleaning_reboot', 'skip_current_clean_step')
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ raise exception.NodeCleaningFailure(
+ "Clean step delete_configuration failed "
+ "on node %(node)s with error: %(err)s" %
+ {'node': node.uuid, 'err': msg})
+ except ilo_error.IloLogicalDriveNotFoundError:
+ LOG.info("No logical drive found to delete on node %(node)s",
+ {'node': node.uuid})
+ except ilo_error.IloError as ilo_exception:
+ operation = (_("Failed to delete raid configuration on node %s")
+ % node.uuid)
+ self._pop_driver_internal_values(task,
+ 'ilo_raid_delete_in_progress',
+ 'cleaning_reboot',
+ 'skip_current_clean_step')
+ node.driver_internal_info = driver_internal_info
+ node.save()
+ self._set_clean_failed(task, operation, ilo_exception)
diff --git a/ironic/objects/__init__.py b/ironic/objects/__init__.py
index 63c4d2b13..96ddc1c28 100644
--- a/ironic/objects/__init__.py
+++ b/ironic/objects/__init__.py
@@ -24,6 +24,7 @@ def register_all():
# NOTE(danms): You must make sure your object gets imported in this
# function in order for it to be registered by services that may
# need to receive it via RPC.
+ __import__('ironic.objects.allocation')
__import__('ironic.objects.bios')
__import__('ironic.objects.chassis')
__import__('ironic.objects.conductor')
diff --git a/ironic/objects/allocation.py b/ironic/objects/allocation.py
new file mode 100644
index 000000000..655b9ad1e
--- /dev/null
+++ b/ironic/objects/allocation.py
@@ -0,0 +1,300 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import strutils
+from oslo_utils import uuidutils
+from oslo_versionedobjects import base as object_base
+
+from ironic.common import exception
+from ironic.common import utils
+from ironic.db import api as dbapi
+from ironic.objects import base
+from ironic.objects import fields as object_fields
+from ironic.objects import notification
+
+
+@base.IronicObjectRegistry.register
+class Allocation(base.IronicObject, object_base.VersionedObjectDictCompat):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ dbapi = dbapi.get_instance()
+
+ fields = {
+ 'id': object_fields.IntegerField(),
+ 'uuid': object_fields.UUIDField(nullable=True),
+ 'name': object_fields.StringField(nullable=True),
+ 'node_id': object_fields.IntegerField(nullable=True),
+ 'state': object_fields.StringField(nullable=True),
+ 'last_error': object_fields.StringField(nullable=True),
+ 'resource_class': object_fields.StringField(nullable=True),
+ 'traits': object_fields.ListOfStringsField(nullable=True),
+ 'candidate_nodes': object_fields.ListOfStringsField(nullable=True),
+ 'extra': object_fields.FlexibleDictField(nullable=True),
+ 'conductor_affinity': object_fields.IntegerField(nullable=True),
+ }
+
+ def _convert_to_version(self, target_version,
+ remove_unavailable_fields=True):
+ """Convert to the target version.
+
+ Convert the object to the target version. The target version may be
+ the same, older, or newer than the version of the object. This is
+ used for DB interactions as well as for serialization/deserialization.
+
+ :param target_version: the desired version of the object
+ :param remove_unavailable_fields: True to remove fields that are
+ unavailable in the target version; set this to True when
+ (de)serializing. False to set the unavailable fields to appropriate
+ values; set this to False for DB interactions.
+ """
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable_classmethod
+ @classmethod
+ def get(cls, context, allocation_ident):
+ """Find an allocation by its ID, UUID or name.
+
+ :param allocation_ident: The ID, UUID or name of an allocation.
+ :param context: Security context
+ :returns: An :class:`Allocation` object.
+ :raises: InvalidIdentity
+
+ """
+ if strutils.is_int_like(allocation_ident):
+ return cls.get_by_id(context, allocation_ident)
+ elif uuidutils.is_uuid_like(allocation_ident):
+ return cls.get_by_uuid(context, allocation_ident)
+ elif utils.is_valid_logical_name(allocation_ident):
+ return cls.get_by_name(context, allocation_ident)
+ else:
+ raise exception.InvalidIdentity(identity=allocation_ident)
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable_classmethod
+ @classmethod
+ def get_by_id(cls, context, allocation_id):
+ """Find an allocation by its integer ID.
+
+ :param cls: the :class:`Allocation`
+ :param context: Security context
+ :param allocation_id: The ID of an allocation.
+ :returns: An :class:`Allocation` object.
+ :raises: AllocationNotFound
+
+ """
+ db_allocation = cls.dbapi.get_allocation_by_id(allocation_id)
+ allocation = cls._from_db_object(context, cls(), db_allocation)
+ return allocation
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable_classmethod
+ @classmethod
+ def get_by_uuid(cls, context, uuid):
+ """Find an allocation by its UUID.
+
+ :param cls: the :class:`Allocation`
+ :param context: Security context
+ :param uuid: The UUID of an allocation.
+ :returns: An :class:`Allocation` object.
+ :raises: AllocationNotFound
+
+ """
+ db_allocation = cls.dbapi.get_allocation_by_uuid(uuid)
+ allocation = cls._from_db_object(context, cls(), db_allocation)
+ return allocation
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable_classmethod
+ @classmethod
+ def get_by_name(cls, context, name):
+ """Find an allocation based by its name.
+
+ :param cls: the :class:`Allocation`
+ :param context: Security context
+ :param name: The name of an allocation.
+ :returns: An :class:`Allocation` object.
+ :raises: AllocationNotFound
+
+ """
+ db_allocation = cls.dbapi.get_allocation_by_name(name)
+ allocation = cls._from_db_object(context, cls(), db_allocation)
+ return allocation
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable_classmethod
+ @classmethod
+ def list(cls, context, filters=None, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of Allocation objects.
+
+ :param cls: the :class:`Allocation`
+ :param context: Security context.
+ :param filters: Filters to apply.
+ :param limit: Maximum number of resources to return in a single result.
+ :param marker: Pagination marker for large data sets.
+ :param sort_key: Column to sort results by.
+ :param sort_dir: Direction to sort. "asc" or "desc".
+ :returns: A list of :class:`Allocation` object.
+ :raises: InvalidParameterValue
+
+ """
+ db_allocations = cls.dbapi.get_allocation_list(filters=filters,
+ limit=limit,
+ marker=marker,
+ sort_key=sort_key,
+ sort_dir=sort_dir)
+ return cls._from_db_object_list(context, db_allocations)
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable
+ def create(self, context=None):
+ """Create a Allocation record in the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Allocation(context)
+ :raises: AllocationDuplicateName, AllocationAlreadyExists
+
+ """
+ values = self.do_version_changes_for_db()
+ db_allocation = self.dbapi.create_allocation(values)
+ self._from_db_object(self._context, self, db_allocation)
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable
+ def destroy(self, context=None):
+ """Delete the Allocation from the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Allocation(context)
+ :raises: AllocationNotFound
+
+ """
+ self.dbapi.destroy_allocation(self.uuid)
+ self.obj_reset_changes()
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable
+ def save(self, context=None):
+ """Save updates to this Allocation.
+
+ Updates will be made column by column based on the result
+ of self.what_changed().
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Allocation(context)
+ :raises: AllocationNotFound, AllocationDuplicateName
+
+ """
+ updates = self.do_version_changes_for_db()
+ updated_allocation = self.dbapi.update_allocation(self.uuid, updates)
+ self._from_db_object(self._context, self, updated_allocation)
+
+ # NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
+ # methods can be used in the future to replace current explicit RPC calls.
+ # Implications of calling new remote procedures should be thought through.
+ # @object_base.remotable
+ def refresh(self, context=None):
+ """Loads updates for this Allocation.
+
+ Loads an allocation with the same uuid from the database and
+ checks for updated attributes. Updates are applied from
+ the loaded allocation column by column, if there are any updates.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Allocation(context)
+ :raises: AllocationNotFound
+
+ """
+ current = self.get_by_uuid(self._context, uuid=self.uuid)
+ self.obj_refresh(current)
+ self.obj_reset_changes()
+
+
+@base.IronicObjectRegistry.register
+class AllocationCRUDNotification(notification.NotificationBase):
+ """Notification when ironic creates, updates or deletes an allocation."""
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'payload': object_fields.ObjectField('AllocationCRUDPayload')
+ }
+
+
+@base.IronicObjectRegistry.register
+class AllocationCRUDPayload(notification.NotificationPayloadBase):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ SCHEMA = {
+ 'candidate_nodes': ('allocation', 'candidate_nodes'),
+ 'created_at': ('allocation', 'created_at'),
+ 'extra': ('allocation', 'extra'),
+ 'last_error': ('allocation', 'last_error'),
+ 'name': ('allocation', 'name'),
+ 'resource_class': ('allocation', 'resource_class'),
+ 'state': ('allocation', 'state'),
+ 'traits': ('allocation', 'traits'),
+ 'updated_at': ('allocation', 'updated_at'),
+ 'uuid': ('allocation', 'uuid')
+ }
+
+ fields = {
+ 'uuid': object_fields.UUIDField(nullable=True),
+ 'name': object_fields.StringField(nullable=True),
+ 'node_uuid': object_fields.StringField(nullable=True),
+ 'state': object_fields.StringField(nullable=True),
+ 'last_error': object_fields.StringField(nullable=True),
+ 'resource_class': object_fields.StringField(nullable=True),
+ 'traits': object_fields.ListOfStringsField(nullable=True),
+ 'candidate_nodes': object_fields.ListOfStringsField(nullable=True),
+ 'extra': object_fields.FlexibleDictField(nullable=True),
+ 'created_at': object_fields.DateTimeField(nullable=True),
+ 'updated_at': object_fields.DateTimeField(nullable=True),
+ }
+
+ def __init__(self, allocation, node_uuid):
+ super(AllocationCRUDPayload, self).__init__(node_uuid=node_uuid)
+ self.populate_schema(allocation=allocation)
diff --git a/ironic/objects/node.py b/ironic/objects/node.py
index 73b918745..3fe530525 100644
--- a/ironic/objects/node.py
+++ b/ironic/objects/node.py
@@ -67,7 +67,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
# Version 1.28: Add automated_clean field
# Version 1.29: Add protected and protected_reason fields
# Version 1.30: Add owner field
- VERSION = '1.30'
+ # Version 1.31: Add allocation_id field
+ VERSION = '1.31'
dbapi = db_api.get_instance()
@@ -136,6 +137,7 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
'automated_clean': objects.fields.BooleanField(nullable=True),
'protected': objects.fields.BooleanField(),
'protected_reason': object_fields.StringField(nullable=True),
+ 'allocation_id': object_fields.IntegerField(nullable=True),
'bios_interface': object_fields.StringField(nullable=True),
'boot_interface': object_fields.StringField(nullable=True),
@@ -585,6 +587,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
should be set to False (or removed).
Version 1.30: owner was added. For versions prior to this, it should be
set to None or removed.
+ Version 1.31: allocation_id was added. For versions prior to this, it
+ should be set to None (or removed).
:param target_version: the desired version of the object
:param remove_unavailable_fields: True to remove fields that are
@@ -597,7 +601,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
# Convert the different fields depending on version
fields = [('rescue_interface', 22), ('traits', 23),
('bios_interface', 24), ('automated_clean', 28),
- ('protected_reason', 29), ('owner', 30)]
+ ('protected_reason', 29), ('owner', 30),
+ ('allocation_id', 31)]
for name, minor in fields:
self._adjust_field_to_version(name, None, target_version,
1, minor, remove_unavailable_fields)
diff --git a/ironic/tests/unit/api/utils.py b/ironic/tests/unit/api/utils.py
index 0635d0294..f2c11bda1 100644
--- a/ironic/tests/unit/api/utils.py
+++ b/ironic/tests/unit/api/utils.py
@@ -100,6 +100,7 @@ def node_post_data(**kw):
node.pop('chassis_id')
node.pop('tags')
node.pop('traits')
+ node.pop('allocation_id')
# NOTE(jroll): pop out fields that were introduced in later API versions,
# unless explicitly requested. Otherwise, these will cause tests using
diff --git a/ironic/tests/unit/common/test_images.py b/ironic/tests/unit/common/test_images.py
index 11d7981a3..81c74eb7a 100644
--- a/ironic/tests/unit/common/test_images.py
+++ b/ironic/tests/unit/common/test_images.py
@@ -496,19 +496,14 @@ class FsImageTestCase(base.TestCase):
files_info = {
'path/to/kernel': 'vmlinuz',
'path/to/ramdisk': 'initrd',
- CONF.isolinux_bin: 'isolinux/isolinux.bin',
'path/to/grub': 'relpath/to/grub.cfg',
'sourceabspath/to/efiboot.img': 'path/to/efiboot.img'
}
- cfg = "cfg"
- cfg_file = 'tmpdir/isolinux/isolinux.cfg'
grubcfg = "grubcfg"
grub_file = 'tmpdir/relpath/to/grub.cfg'
- gen_cfg_mock.side_effect = cfg, grubcfg
+ gen_cfg_mock.side_effect = (grubcfg,)
params = ['a=b', 'c']
- isolinux_options = {'kernel': '/vmlinuz',
- 'ramdisk': '/initrd'}
grub_options = {'linux': '/vmlinuz',
'initrd': '/initrd'}
@@ -531,18 +526,12 @@ class FsImageTestCase(base.TestCase):
kernel_params=params)
mount_mock.assert_called_once_with('path/to/deploy_iso', 'mountdir')
create_root_fs_mock.assert_called_once_with('tmpdir', files_info)
- gen_cfg_mock.assert_any_call(params, CONF.isolinux_config_template,
- isolinux_options)
- write_to_file_mock.assert_any_call(cfg_file, cfg)
gen_cfg_mock.assert_any_call(params, CONF.grub_config_template,
grub_options)
write_to_file_mock.assert_any_call(grub_file, grubcfg)
execute_mock.assert_called_once_with(
- 'mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO", '-cache-inodes', '-J',
- '-l', '-no-emul-boot', '-boot-load-size', '4', '-boot-info-table',
- '-b', 'isolinux/isolinux.bin', '-eltorito-alt-boot',
- '-e', 'path/to/efiboot.img', '-no-emul-boot',
- '-o', 'tgt_file', 'tmpdir')
+ 'mkisofs', '-r', '-V', 'VMEDIA_BOOT_ISO', '-l', '-e',
+ 'path/to/efiboot.img', '-no-emul-boot', '-o', 'tgt_file', 'tmpdir')
umount_mock.assert_called_once_with('mountdir')
@mock.patch.object(images, '_create_root_fs', autospec=True)
diff --git a/ironic/tests/unit/common/test_pxe_utils.py b/ironic/tests/unit/common/test_pxe_utils.py
index 87f64c263..f0eebb21d 100644
--- a/ironic/tests/unit/common/test_pxe_utils.py
+++ b/ironic/tests/unit/common/test_pxe_utils.py
@@ -860,13 +860,23 @@ class TestPXEUtils(db_base.DbTestCase):
expected_info = [{'opt_name': 'tag:!ipxe,59',
'opt_value': 'tftp://[ff80::1]/fake-bootfile',
'ip_version': ip_version},
+ {'opt_name': 'tag:!ipxe6,59',
+ 'opt_value': 'tftp://[ff80::1]/fake-bootfile',
+ 'ip_version': ip_version},
{'opt_name': 'tag:ipxe,59',
'opt_value': expected_boot_script_url,
+ 'ip_version': ip_version},
+ {'opt_name': 'tag:ipxe6,59',
+ 'opt_value': expected_boot_script_url,
'ip_version': ip_version}]
+
elif ip_version == 4:
expected_info = [{'opt_name': 'tag:!ipxe,67',
'opt_value': boot_file,
'ip_version': ip_version},
+ {'opt_name': 'tag:!ipxe6,67',
+ 'opt_value': boot_file,
+ 'ip_version': ip_version},
{'opt_name': '66',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
@@ -876,6 +886,9 @@ class TestPXEUtils(db_base.DbTestCase):
{'opt_name': 'tag:ipxe,67',
'opt_value': expected_boot_script_url,
'ip_version': ip_version},
+ {'opt_name': 'tag:ipxe6,67',
+ 'opt_value': expected_boot_script_url,
+ 'ip_version': ip_version},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
diff --git a/ironic/tests/unit/common/test_raid.py b/ironic/tests/unit/common/test_raid.py
index 40ae9f400..004af870d 100644
--- a/ironic/tests/unit/common/test_raid.py
+++ b/ironic/tests/unit/common/test_raid.py
@@ -161,6 +161,21 @@ class ValidateRaidConfigurationTestCase(base.TestCase):
class RaidPublicMethodsTestCase(db_base.DbTestCase):
+ def setUp(self):
+ super(RaidPublicMethodsTestCase, self).setUp()
+ self.target_raid_config = {
+ "logical_disks": [
+ {'size_gb': 200, 'raid_level': 0, 'is_root_volume': True},
+ {'size_gb': 200, 'raid_level': 5}
+ ]}
+ n = {
+ 'boot_interface': 'pxe',
+ 'deploy_interface': 'direct',
+ 'raid_interface': 'agent',
+ 'target_raid_config': self.target_raid_config,
+ }
+ self.node = obj_utils.create_test_node(self.context, **n)
+
def test_get_logical_disk_properties(self):
with open(drivers_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
schema = json.load(raid_schema_fobj)
@@ -186,7 +201,7 @@ class RaidPublicMethodsTestCase(db_base.DbTestCase):
def _test_update_raid_info(self, current_config,
capabilities=None):
- node = obj_utils.create_test_node(self.context)
+ node = self.node
if capabilities:
properties = node.properties
properties['capabilities'] = capabilities
@@ -239,3 +254,37 @@ class RaidPublicMethodsTestCase(db_base.DbTestCase):
self.assertRaises(exception.InvalidParameterValue,
self._test_update_raid_info,
current_config)
+
+ def test_filter_target_raid_config(self):
+ result = raid.filter_target_raid_config(self.node)
+ self.assertEqual(self.node.target_raid_config, result)
+
+ def test_filter_target_raid_config_skip_root(self):
+ result = raid.filter_target_raid_config(
+ self.node, create_root_volume=False)
+ exp_target_raid_config = {
+ "logical_disks": [{'size_gb': 200, 'raid_level': 5}]}
+ self.assertEqual(exp_target_raid_config, result)
+
+ def test_filter_target_raid_config_skip_nonroot(self):
+ result = raid.filter_target_raid_config(
+ self.node, create_nonroot_volumes=False)
+ exp_target_raid_config = {
+ "logical_disks": [{'size_gb': 200,
+ 'raid_level': 0,
+ 'is_root_volume': True}]}
+ self.assertEqual(exp_target_raid_config, result)
+
+ def test_filter_target_raid_config_no_target_raid_config_after_skipping(
+ self):
+ self.assertRaises(exception.MissingParameterValue,
+ raid.filter_target_raid_config,
+ self.node, create_root_volume=False,
+ create_nonroot_volumes=False)
+
+ def test_filter_target_raid_config_empty_target_raid_config(self):
+ self.node.target_raid_config = {}
+ self.node.save()
+ self.assertRaises(exception.MissingParameterValue,
+ raid.filter_target_raid_config,
+ self.node)
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index 0cfb7de9e..f34ee3487 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -625,6 +625,16 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
self.assertEqual(existing_driver, node.driver)
+ def test_update_node_from_invalid_driver(self):
+ existing_driver = 'fake-hardware'
+ wrong_driver = 'wrong-driver'
+ node = obj_utils.create_test_node(self.context, driver=wrong_driver)
+ node.driver = existing_driver
+ result = self.service.update_node(self.context, node)
+ self.assertEqual(existing_driver, result.driver)
+ node.refresh()
+ self.assertEqual(existing_driver, node.driver)
+
UpdateInterfaces = namedtuple('UpdateInterfaces', ('old', 'new'))
# NOTE(dtantsur): "old" interfaces here do not match the defaults, so that
# we can test resetting them.
diff --git a/ironic/tests/unit/conductor/test_task_manager.py b/ironic/tests/unit/conductor/test_task_manager.py
index 37255af50..1cb61441f 100644
--- a/ironic/tests/unit/conductor/test_task_manager.py
+++ b/ironic/tests/unit/conductor/test_task_manager.py
@@ -79,6 +79,24 @@ class TaskManagerTestCase(db_base.DbTestCase):
release_mock.assert_called_once_with(self.context, self.host,
self.node.id)
+ def test_no_driver(self, get_voltgt_mock, get_volconn_mock,
+ get_portgroups_mock, get_ports_mock,
+ build_driver_mock, reserve_mock, release_mock,
+ node_get_mock):
+ reserve_mock.return_value = self.node
+ with task_manager.TaskManager(self.context, 'fake-node-id',
+ load_driver=False) as task:
+ self.assertEqual(self.context, task.context)
+ self.assertEqual(self.node, task.node)
+ self.assertEqual(get_ports_mock.return_value, task.ports)
+ self.assertEqual(get_portgroups_mock.return_value, task.portgroups)
+ self.assertEqual(get_volconn_mock.return_value,
+ task.volume_connectors)
+ self.assertEqual(get_voltgt_mock.return_value, task.volume_targets)
+ self.assertIsNone(task.driver)
+ self.assertFalse(task.shared)
+ self.assertFalse(build_driver_mock.called)
+
def test_excl_nested_acquire(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
@@ -159,6 +177,28 @@ class TaskManagerTestCase(db_base.DbTestCase):
reserve_mock.assert_has_calls(expected_calls)
self.assertEqual(2, reserve_mock.call_count)
+ def test_excl_lock_exception_no_retries(
+ self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
+ get_ports_mock, build_driver_mock,
+ reserve_mock, release_mock, node_get_mock):
+ retry_attempts = 3
+ self.config(node_locked_retry_attempts=retry_attempts,
+ group='conductor')
+
+ # Fail on the first lock attempt, succeed on the second.
+ reserve_mock.side_effect = [exception.NodeLocked(node='foo',
+ host='foo'),
+ self.node]
+
+ self.assertRaises(exception.NodeLocked,
+ task_manager.TaskManager,
+ self.context,
+ 'fake-node-id',
+ retry=False)
+
+ reserve_mock.assert_called_once_with(self.context, self.host,
+ 'fake-node-id')
+
def test_excl_lock_reserve_exception(
self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
get_ports_mock, build_driver_mock,
diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
index 10b409f20..9593f2605 100644
--- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py
+++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
@@ -791,6 +791,57 @@ class MigrationCheckersMixin(object):
col_names = [column.name for column in nodes.c]
self.assertIn('owner', col_names)
+ def _pre_upgrade_dd67b91a1981(self, engine):
+ data = {
+ 'node_uuid': uuidutils.generate_uuid(),
+ }
+
+ nodes = db_utils.get_table(engine, 'nodes')
+ nodes.insert().execute({'uuid': data['node_uuid']})
+
+ return data
+
+ def _check_dd67b91a1981(self, engine, data):
+ nodes = db_utils.get_table(engine, 'nodes')
+ col_names = [column.name for column in nodes.c]
+ self.assertIn('allocation_id', col_names)
+
+ node = nodes.select(
+ nodes.c.uuid == data['node_uuid']).execute().first()
+ self.assertIsNone(node['allocation_id'])
+
+ allocations = db_utils.get_table(engine, 'allocations')
+ col_names = [column.name for column in allocations.c]
+ expected_names = ['id', 'uuid', 'node_id', 'created_at', 'updated_at',
+ 'name', 'version', 'state', 'last_error',
+ 'resource_class', 'traits', 'candidate_nodes',
+ 'extra', 'conductor_affinity']
+ self.assertEqual(sorted(expected_names), sorted(col_names))
+ self.assertIsInstance(allocations.c.created_at.type,
+ sqlalchemy.types.DateTime)
+ self.assertIsInstance(allocations.c.updated_at.type,
+ sqlalchemy.types.DateTime)
+ self.assertIsInstance(allocations.c.id.type,
+ sqlalchemy.types.Integer)
+ self.assertIsInstance(allocations.c.uuid.type,
+ sqlalchemy.types.String)
+ self.assertIsInstance(allocations.c.node_id.type,
+ sqlalchemy.types.Integer)
+ self.assertIsInstance(allocations.c.state.type,
+ sqlalchemy.types.String)
+ self.assertIsInstance(allocations.c.last_error.type,
+ sqlalchemy.types.TEXT)
+ self.assertIsInstance(allocations.c.resource_class.type,
+ sqlalchemy.types.String)
+ self.assertIsInstance(allocations.c.traits.type,
+ sqlalchemy.types.TEXT)
+ self.assertIsInstance(allocations.c.candidate_nodes.type,
+ sqlalchemy.types.TEXT)
+ self.assertIsInstance(allocations.c.extra.type,
+ sqlalchemy.types.TEXT)
+ self.assertIsInstance(allocations.c.conductor_affinity.type,
+ sqlalchemy.types.Integer)
+
def test_upgrade_and_version(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('head')
diff --git a/ironic/tests/unit/db/test_allocations.py b/ironic/tests/unit/db/test_allocations.py
new file mode 100644
index 000000000..8809c7a76
--- /dev/null
+++ b/ironic/tests/unit/db/test_allocations.py
@@ -0,0 +1,234 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for manipulating allocations via the DB API"""
+
+from oslo_utils import uuidutils
+
+from ironic.common import exception
+from ironic.tests.unit.db import base
+from ironic.tests.unit.db import utils as db_utils
+
+
+class AllocationsTestCase(base.DbTestCase):
+
+ def setUp(self):
+ super(AllocationsTestCase, self).setUp()
+ self.node = db_utils.create_test_node()
+ self.allocation = db_utils.create_test_allocation(name='host1')
+
+ def _create_test_allocation_range(self, count, **kw):
+ """Create the specified number of test allocation entries in DB
+
+ It uses create_test_allocation method. And returns List of Allocation
+ DB objects.
+
+ :param count: Specifies the number of allocations to be created
+ :returns: List of Allocation DB objects
+
+ """
+ return [db_utils.create_test_allocation(uuid=uuidutils.generate_uuid(),
+ name='allocation' + str(i),
+ **kw).uuid
+ for i in range(count)]
+
+ def test_get_allocation_by_id(self):
+ res = self.dbapi.get_allocation_by_id(self.allocation.id)
+ self.assertEqual(self.allocation.uuid, res.uuid)
+
+ def test_get_allocation_by_id_that_does_not_exist(self):
+ self.assertRaises(exception.AllocationNotFound,
+ self.dbapi.get_allocation_by_id, 99)
+
+ def test_get_allocation_by_uuid(self):
+ res = self.dbapi.get_allocation_by_uuid(self.allocation.uuid)
+ self.assertEqual(self.allocation.id, res.id)
+
+ def test_get_allocation_by_uuid_that_does_not_exist(self):
+ self.assertRaises(exception.AllocationNotFound,
+ self.dbapi.get_allocation_by_uuid,
+ 'EEEEEEEE-EEEE-EEEE-EEEE-EEEEEEEEEEEE')
+
+ def test_get_allocation_by_name(self):
+ res = self.dbapi.get_allocation_by_name(self.allocation.name)
+ self.assertEqual(self.allocation.id, res.id)
+
+ def test_get_allocation_by_name_that_does_not_exist(self):
+ self.assertRaises(exception.AllocationNotFound,
+ self.dbapi.get_allocation_by_name, 'testfail')
+
+ def test_get_allocation_list(self):
+ uuids = self._create_test_allocation_range(6)
+ # Also add the uuid for the allocation created in setUp()
+ uuids.append(self.allocation.uuid)
+
+ res = self.dbapi.get_allocation_list()
+ self.assertEqual(set(uuids), {r.uuid for r in res})
+
+ def test_get_allocation_list_sorted(self):
+ uuids = self._create_test_allocation_range(6)
+ # Also add the uuid for the allocation created in setUp()
+ uuids.append(self.allocation.uuid)
+
+ res = self.dbapi.get_allocation_list(sort_key='uuid')
+ res_uuids = [r.uuid for r in res]
+ self.assertEqual(sorted(uuids), res_uuids)
+
+ def test_get_allocation_list_filter_by_state(self):
+ self._create_test_allocation_range(6, state='error')
+
+ res = self.dbapi.get_allocation_list(filters={'state': 'allocating'})
+ self.assertEqual([self.allocation.uuid], [r.uuid for r in res])
+
+ res = self.dbapi.get_allocation_list(filters={'state': 'error'})
+ self.assertEqual(6, len(res))
+
+ def test_get_allocation_list_filter_by_node(self):
+ self._create_test_allocation_range(6)
+ self.dbapi.update_allocation(self.allocation.id,
+ {'node_id': self.node.id})
+
+ res = self.dbapi.get_allocation_list(
+ filters={'node_uuid': self.node.uuid})
+ self.assertEqual([self.allocation.uuid], [r.uuid for r in res])
+
+ def test_get_allocation_list_filter_by_rsc(self):
+ self._create_test_allocation_range(6)
+ self.dbapi.update_allocation(self.allocation.id,
+ {'resource_class': 'very-large'})
+
+ res = self.dbapi.get_allocation_list(
+ filters={'resource_class': 'very-large'})
+ self.assertEqual([self.allocation.uuid], [r.uuid for r in res])
+
+ def test_get_allocation_list_invalid_fields(self):
+ self.assertRaises(exception.InvalidParameterValue,
+ self.dbapi.get_allocation_list, sort_key='foo')
+ self.assertRaises(ValueError,
+ self.dbapi.get_allocation_list,
+ filters={'foo': 42})
+
+ def test_destroy_allocation(self):
+ self.dbapi.destroy_allocation(self.allocation.id)
+ self.assertRaises(exception.AllocationNotFound,
+ self.dbapi.get_allocation_by_id, self.allocation.id)
+
+ def test_destroy_allocation_with_node(self):
+ self.dbapi.update_node(self.node.id,
+ {'allocation_id': self.allocation.id,
+ 'instance_uuid': uuidutils.generate_uuid(),
+ 'instance_info': {'traits': ['foo']}})
+ self.dbapi.destroy_allocation(self.allocation.id)
+ self.assertRaises(exception.AllocationNotFound,
+ self.dbapi.get_allocation_by_id, self.allocation.id)
+ node = self.dbapi.get_node_by_id(self.node.id)
+ self.assertIsNone(node.allocation_id)
+ self.assertIsNone(node.instance_uuid)
+ # NOTE(dtantsur): currently we do not clean up instance_info contents
+ # on deallocation. It may be changed in the future.
+ self.assertEqual(node.instance_info, {'traits': ['foo']})
+
+ def test_destroy_allocation_that_does_not_exist(self):
+ self.assertRaises(exception.AllocationNotFound,
+ self.dbapi.destroy_allocation, 99)
+
+ def test_destroy_allocation_uuid(self):
+ self.dbapi.destroy_allocation(self.allocation.uuid)
+
+ def test_update_allocation(self):
+ old_name = self.allocation.name
+ new_name = 'newname'
+ self.assertNotEqual(old_name, new_name)
+ res = self.dbapi.update_allocation(self.allocation.id,
+ {'name': new_name})
+ self.assertEqual(new_name, res.name)
+
+ def test_update_allocation_uuid(self):
+ self.assertRaises(exception.InvalidParameterValue,
+ self.dbapi.update_allocation, self.allocation.id,
+ {'uuid': ''})
+
+ def test_update_allocation_not_found(self):
+ id_2 = 99
+ self.assertNotEqual(self.allocation.id, id_2)
+ self.assertRaises(exception.AllocationNotFound,
+ self.dbapi.update_allocation, id_2,
+ {'name': 'newname'})
+
+ def test_update_allocation_duplicated_name(self):
+ name1 = self.allocation.name
+ allocation2 = db_utils.create_test_allocation(
+ uuid=uuidutils.generate_uuid(), name='name2')
+ self.assertRaises(exception.AllocationDuplicateName,
+ self.dbapi.update_allocation, allocation2.id,
+ {'name': name1})
+
+ def test_update_allocation_with_node_id(self):
+ res = self.dbapi.update_allocation(self.allocation.id,
+ {'name': 'newname',
+ 'traits': ['foo'],
+ 'node_id': self.node.id})
+ self.assertEqual('newname', res.name)
+ self.assertEqual(['foo'], res.traits)
+ self.assertEqual(self.node.id, res.node_id)
+
+ node = self.dbapi.get_node_by_id(self.node.id)
+ self.assertEqual(res.id, node.allocation_id)
+ self.assertEqual(res.uuid, node.instance_uuid)
+ self.assertEqual(['foo'], node.instance_info['traits'])
+
+ def test_update_allocation_node_already_associated(self):
+ existing_uuid = uuidutils.generate_uuid()
+ self.dbapi.update_node(self.node.id, {'instance_uuid': existing_uuid})
+ self.assertRaises(exception.NodeAssociated,
+ self.dbapi.update_allocation, self.allocation.id,
+ {'node_id': self.node.id, 'traits': ['foo']})
+
+ # Make sure we do not see partial updates
+ allocation = self.dbapi.get_allocation_by_id(self.allocation.id)
+ self.assertEqual([], allocation.traits)
+ self.assertIsNone(allocation.node_id)
+
+ node = self.dbapi.get_node_by_id(self.node.id)
+ self.assertIsNone(node.allocation_id)
+ self.assertEqual(existing_uuid, node.instance_uuid)
+ self.assertNotIn('traits', node.instance_info)
+
+ def test_update_allocation_associated_with_another_node(self):
+ db_utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ allocation_id=self.allocation.id,
+ instance_uuid=self.allocation.uuid)
+
+ self.assertRaises(exception.InstanceAssociated,
+ self.dbapi.update_allocation, self.allocation.id,
+ {'node_id': self.node.id, 'traits': ['foo']})
+
+ # Make sure we do not see partial updates
+ allocation = self.dbapi.get_allocation_by_id(self.allocation.id)
+ self.assertEqual([], allocation.traits)
+ self.assertIsNone(allocation.node_id)
+
+ node = self.dbapi.get_node_by_id(self.node.id)
+ self.assertIsNone(node.allocation_id)
+ self.assertIsNone(node.instance_uuid)
+ self.assertNotIn('traits', node.instance_info)
+
+ def test_create_allocation_duplicated_name(self):
+ self.assertRaises(exception.AllocationDuplicateName,
+ db_utils.create_test_allocation,
+ uuid=uuidutils.generate_uuid(),
+ name=self.allocation.name)
+
+ def test_create_allocation_duplicated_uuid(self):
+ self.assertRaises(exception.AllocationAlreadyExists,
+ db_utils.create_test_allocation,
+ uuid=self.allocation.uuid)
diff --git a/ironic/tests/unit/db/test_nodes.py b/ironic/tests/unit/db/test_nodes.py
index 45b770370..f92baa7b1 100644
--- a/ironic/tests/unit/db/test_nodes.py
+++ b/ironic/tests/unit/db/test_nodes.py
@@ -302,7 +302,8 @@ class DbNodeTestCase(base.DbTestCase):
maintenance=True,
fault='boom',
resource_class='foo',
- conductor_group='group1')
+ conductor_group='group1',
+ power_state='power on')
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch1['uuid']})
self.assertEqual([node1.id], [r.id for r in res])
@@ -355,6 +356,18 @@ class DbNodeTestCase(base.DbTestCase):
res = self.dbapi.get_node_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
+ uuids = [uuidutils.generate_uuid(),
+ node1.uuid,
+ uuidutils.generate_uuid()]
+ res = self.dbapi.get_node_list(filters={'uuid_in': uuids})
+ self.assertEqual([node1.id], [r.id for r in res])
+
+ res = self.dbapi.get_node_list(filters={'with_power_state': True})
+ self.assertEqual([node2.id], [r.id for r in res])
+
+ res = self.dbapi.get_node_list(filters={'with_power_state': False})
+ self.assertEqual([node1.id], [r.id for r in res])
+
# ensure unknown filters explode
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
@@ -519,6 +532,15 @@ class DbNodeTestCase(base.DbTestCase):
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
+ def test_allocations_get_destroyed_after_destroying_a_node_by_uuid(self):
+ node = utils.create_test_node()
+
+ allocation = utils.create_test_allocation(node_id=node.id)
+
+ self.dbapi.destroy_node(node.uuid)
+ self.assertRaises(exception.AllocationNotFound,
+ self.dbapi.get_allocation_by_id, allocation.id)
+
def test_update_node(self):
node = utils.create_test_node()
diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py
index a0bc1fb38..f3f838000 100644
--- a/ironic/tests/unit/db/utils.py
+++ b/ironic/tests/unit/db/utils.py
@@ -16,10 +16,12 @@
from oslo_utils import timeutils
+from oslo_utils import uuidutils
from ironic.common import states
from ironic.db import api as db_api
from ironic.drivers import base as drivers_base
+from ironic.objects import allocation
from ironic.objects import bios
from ironic.objects import chassis
from ironic.objects import conductor
@@ -219,6 +221,7 @@ def get_test_node(**kw):
'protected_reason': kw.get('protected_reason', None),
'conductor': kw.get('conductor'),
'owner': kw.get('owner', None),
+ 'allocation_id': kw.get('allocation_id'),
}
for iface in drivers_base.ALL_INTERFACES:
@@ -588,3 +591,30 @@ def get_test_bios_setting_setting_list():
{'name': 'hyperthread', 'value': 'enabled'},
{'name': 'numlock', 'value': 'off'}
]
+
+
+def get_test_allocation(**kw):
+ return {
+ 'candidate_nodes': kw.get('candidate_nodes', []),
+ 'conductor_affinity': kw.get('conductor_affinity'),
+ 'created_at': kw.get('created_at'),
+ 'extra': kw.get('extra', {}),
+ 'id': kw.get('id', 42),
+ 'last_error': kw.get('last_error'),
+ 'name': kw.get('name'),
+ 'node_id': kw.get('node_id'),
+ 'resource_class': kw.get('resource_class', 'baremetal'),
+ 'state': kw.get('state', 'allocating'),
+ 'traits': kw.get('traits', []),
+ 'updated_at': kw.get('updated_at'),
+ 'uuid': kw.get('uuid', uuidutils.generate_uuid()),
+ 'version': kw.get('version', allocation.Allocation.VERSION),
+ }
+
+
+def create_test_allocation(**kw):
+ allocation = get_test_allocation(**kw)
+ if 'id' not in kw:
+ del allocation['id']
+ dbapi = db_api.get_instance()
+ return dbapi.create_allocation(allocation)
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_raid.py b/ironic/tests/unit/drivers/modules/ilo/test_raid.py
new file mode 100644
index 000000000..34b859023
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/ilo/test_raid.py
@@ -0,0 +1,342 @@
+# Copyright 2018 Hewlett Packard Enterprise Development LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test class for Raid Interface used by iLO5."""
+
+import mock
+from oslo_utils import importutils
+
+from ironic.common import exception
+from ironic.common import raid
+from ironic.common import states
+from ironic.conductor import task_manager
+from ironic.conductor import utils as manager_utils
+from ironic.drivers.modules import deploy_utils
+from ironic.drivers.modules.ilo import common as ilo_common
+from ironic.drivers.modules.ilo import raid as ilo_raid
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.db import utils as db_utils
+from ironic.tests.unit.objects import utils as obj_utils
+
+ilo_error = importutils.try_import('proliantutils.exception')
+
+INFO_DICT = db_utils.get_test_ilo_info()
+
+
+class Ilo5RAIDTestCase(db_base.DbTestCase):
+
+ def setUp(self):
+ super(Ilo5RAIDTestCase, self).setUp()
+ self.driver = mock.Mock(raid=ilo_raid.Ilo5RAID())
+ self.target_raid_config = {
+ "logical_disks": [
+ {'size_gb': 200, 'raid_level': 0, 'is_root_volume': True},
+ {'size_gb': 200, 'raid_level': 5}
+ ]}
+ self.clean_step = {'step': 'create_configuration',
+ 'interface': 'raid'}
+ n = {
+ 'driver': 'ilo5',
+ 'driver_info': INFO_DICT,
+ 'target_raid_config': self.target_raid_config,
+ 'clean_step': self.clean_step,
+ }
+ self.config(enabled_hardware_types=['ilo5'],
+ enabled_boot_interfaces=['ilo-virtual-media'],
+ enabled_console_interfaces=['ilo'],
+ enabled_deploy_interfaces=['iscsi'],
+ enabled_inspect_interfaces=['ilo'],
+ enabled_management_interfaces=['ilo'],
+ enabled_power_interfaces=['ilo'],
+ enabled_raid_interfaces=['ilo5'])
+ self.node = obj_utils.create_test_node(self.context, **n)
+
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ def test__prepare_for_read_raid_create_raid(
+ self, mock_reboot, mock_build_opt):
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ mock_build_opt.return_value = []
+ task.driver.raid._prepare_for_read_raid(task, 'create_raid')
+ self.assertTrue(
+ task.node.driver_internal_info.get(
+ 'ilo_raid_create_in_progress'))
+ self.assertTrue(
+ task.node.driver_internal_info.get(
+ 'cleaning_reboot'))
+ self.assertFalse(
+ task.node.driver_internal_info.get(
+ 'skip_current_clean_step'))
+ mock_reboot.assert_called_once_with(task, states.REBOOT)
+
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ def test__prepare_for_read_raid_delete_raid(
+ self, mock_reboot, mock_build_opt):
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ mock_build_opt.return_value = []
+ task.driver.raid._prepare_for_read_raid(task, 'delete_raid')
+ self.assertTrue(
+ task.node.driver_internal_info.get(
+ 'ilo_raid_delete_in_progress'))
+ self.assertTrue(
+ task.node.driver_internal_info.get(
+ 'cleaning_reboot'))
+ self.assertEqual(
+ task.node.driver_internal_info.get(
+ 'skip_current_clean_step'), False)
+ mock_reboot.assert_called_once_with(task, states.REBOOT)
+
+ @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid')
+ @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_create_configuration(
+ self, ilo_mock, filter_target_raid_config_mock, prepare_raid_mock):
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ filter_target_raid_config_mock.return_value = (
+ self.target_raid_config)
+ result = task.driver.raid.create_configuration(task)
+ prepare_raid_mock.assert_called_once_with(task, 'create_raid')
+ (ilo_mock_object.create_raid_configuration.
+ assert_called_once_with(self.target_raid_config))
+ self.assertEqual(states.CLEANWAIT, result)
+
+ @mock.patch.object(raid, 'update_raid_info', autospec=True)
+ @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_create_configuration_with_read_raid(
+ self, ilo_mock, filter_target_raid_config_mock, update_raid_mock):
+ raid_conf = {u'logical_disks':
+ [{u'size_gb': 89,
+ u'physical_disks': [u'5I:1:1'],
+ u'raid_level': u'0',
+ u'root_device_hint': {u'wwn': u'0x600508b1001c7e87'},
+ u'controller': u'Smart Array P822 in Slot 1',
+ u'volume_name': u'0006EB7BPDVTF0BRH5L0EAEDDA'}]
+ }
+ ilo_mock_object = ilo_mock.return_value
+ self.node.driver_internal_info = {'ilo_raid_create_in_progress': True}
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ filter_target_raid_config_mock.return_value = (
+ self.target_raid_config)
+ ilo_mock_object.read_raid_configuration.return_value = raid_conf
+ task.driver.raid.create_configuration(task)
+ update_raid_mock.assert_called_once_with(task.node, raid_conf)
+ self.assertNotIn('ilo_raid_create_in_progress',
+ task.node.driver_internal_info)
+ self.assertNotIn('cleaning_reboot',
+ task.node.driver_internal_info)
+ self.assertNotIn('skip_current_clean_step',
+ task.node.driver_internal_info)
+
+ @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_create_configuration_with_read_raid_failed(
+ self, ilo_mock, filter_target_raid_config_mock):
+ raid_conf = {u'logical_disks': []}
+ self.node.driver_internal_info = {'ilo_raid_create_in_progress': True}
+ self.node.save()
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ filter_target_raid_config_mock.return_value = (
+ self.target_raid_config)
+ ilo_mock_object.read_raid_configuration.return_value = raid_conf
+ self.assertRaises(exception.NodeCleaningFailure,
+ task.driver.raid.create_configuration, task)
+ self.assertNotIn('ilo_raid_create_in_progress',
+ task.node.driver_internal_info)
+ self.assertNotIn('cleaning_reboot',
+ task.node.driver_internal_info)
+ self.assertNotIn('skip_current_clean_step',
+ task.node.driver_internal_info)
+
+ @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_create_configuration_empty_target_raid_config(
+ self, ilo_mock, filter_target_raid_config_mock):
+ self.node.target_raid_config = {}
+ self.node.save()
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ msg = "Node %s has no target RAID configuration" % self.node.uuid
+ filter_target_raid_config_mock.side_effect = (
+ exception.MissingParameterValue(msg))
+ self.assertRaises(exception.MissingParameterValue,
+ task.driver.raid.create_configuration, task)
+ self.assertFalse(ilo_mock_object.create_raid_configuration.called)
+
+ @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid')
+ @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_create_configuration_skip_root(
+ self, ilo_mock, filter_target_raid_config_mock,
+ prepare_raid_mock):
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ exp_target_raid_config = {
+ "logical_disks": [
+ {'size_gb': 200, 'raid_level': 5}
+ ]}
+ filter_target_raid_config_mock.return_value = (
+ exp_target_raid_config)
+ result = task.driver.raid.create_configuration(
+ task, create_root_volume=False)
+ (ilo_mock_object.create_raid_configuration.
+ assert_called_once_with(exp_target_raid_config))
+ self.assertEqual(states.CLEANWAIT, result)
+ prepare_raid_mock.assert_called_once_with(task, 'create_raid')
+ self.assertEqual(
+ exp_target_raid_config,
+ task.node.driver_internal_info['target_raid_config'])
+
+ @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid')
+ @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_create_configuration_skip_non_root(
+ self, ilo_mock, filter_target_raid_config_mock, prepare_raid_mock):
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ exp_target_raid_config = {
+ "logical_disks": [
+ {'size_gb': 200, 'raid_level': 0, 'is_root_volume': True}
+ ]}
+ filter_target_raid_config_mock.return_value = (
+ exp_target_raid_config)
+ result = task.driver.raid.create_configuration(
+ task, create_nonroot_volumes=False)
+ (ilo_mock_object.create_raid_configuration.
+ assert_called_once_with(exp_target_raid_config))
+ prepare_raid_mock.assert_called_once_with(task, 'create_raid')
+ self.assertEqual(states.CLEANWAIT, result)
+ self.assertEqual(
+ exp_target_raid_config,
+ task.node.driver_internal_info['target_raid_config'])
+
+ @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_create_configuration_skip_root_skip_non_root(
+ self, ilo_mock, filter_target_raid_config_mock):
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ msg = "Node %s has no target RAID configuration" % self.node.uuid
+ filter_target_raid_config_mock.side_effect = (
+ exception.MissingParameterValue(msg))
+ self.assertRaises(
+ exception.MissingParameterValue,
+ task.driver.raid.create_configuration,
+ task, False, False)
+ self.assertFalse(ilo_mock_object.create_raid_configuration.called)
+
+ @mock.patch.object(ilo_raid.Ilo5RAID, '_set_clean_failed')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_create_configuration_ilo_error(self, ilo_mock,
+ set_clean_failed_mock):
+ ilo_mock_object = ilo_mock.return_value
+ exc = ilo_error.IloError('error')
+ ilo_mock_object.create_raid_configuration.side_effect = exc
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.raid.create_configuration(
+ task, create_nonroot_volumes=False)
+ set_clean_failed_mock.assert_called_once_with(
+ task,
+ 'Failed to create raid configuration '
+ 'on node %s' % self.node.uuid, exc)
+ self.assertNotIn('ilo_raid_create_in_progress',
+ task.node.driver_internal_info)
+ self.assertNotIn('cleaning_reboot',
+ task.node.driver_internal_info)
+ self.assertNotIn('skip_current_clean_step',
+ task.node.driver_internal_info)
+
+ @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_delete_configuration(self, ilo_mock, prepare_raid_mock):
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ result = task.driver.raid.delete_configuration(task)
+ self.assertEqual(states.CLEANWAIT, result)
+ ilo_mock_object.delete_raid_configuration.assert_called_once_with()
+ prepare_raid_mock.assert_called_once_with(task, 'delete_raid')
+
+ @mock.patch.object(ilo_raid.LOG, 'info', spec_set=True,
+ autospec=True)
+ @mock.patch.object(ilo_raid.Ilo5RAID, '_prepare_for_read_raid')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_delete_configuration_no_logical_drive(
+ self, ilo_mock, prepare_raid_mock, log_mock):
+ ilo_mock_object = ilo_mock.return_value
+ exc = ilo_error.IloLogicalDriveNotFoundError('No logical drive found')
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ ilo_mock_object.delete_raid_configuration.side_effect = exc
+ task.driver.raid.delete_configuration(task)
+ self.assertTrue(log_mock.called)
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_delete_configuration_with_read_raid(self, ilo_mock):
+ raid_conf = {u'logical_disks': []}
+ self.node.driver_internal_info = {'ilo_raid_delete_in_progress': True}
+ self.node.save()
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ ilo_mock_object.read_raid_configuration.return_value = raid_conf
+ task.driver.raid.delete_configuration(task)
+ self.assertEqual(self.node.raid_config, {})
+ self.assertNotIn('ilo_raid_delete_in_progress',
+ task.node.driver_internal_info)
+ self.assertNotIn('cleaning_reboot',
+ task.node.driver_internal_info)
+ self.assertNotIn('skip_current_clean_step',
+ task.node.driver_internal_info)
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_delete_configuration_with_read_raid_failed(self, ilo_mock):
+ raid_conf = {u'logical_disks': [{'size_gb': 200,
+ 'raid_level': 0,
+ 'is_root_volume': True}]}
+ self.node.driver_internal_info = {'ilo_raid_delete_in_progress': True}
+ self.node.save()
+ ilo_mock_object = ilo_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ ilo_mock_object.read_raid_configuration.return_value = raid_conf
+ self.assertRaises(exception.NodeCleaningFailure,
+ task.driver.raid.delete_configuration, task)
+ self.assertNotIn('ilo_raid_delete_in_progress',
+ task.node.driver_internal_info)
+ self.assertNotIn('cleaning_reboot',
+ task.node.driver_internal_info)
+ self.assertNotIn('skip_current_clean_step',
+ task.node.driver_internal_info)
+
+ @mock.patch.object(ilo_raid.Ilo5RAID, '_set_clean_failed')
+ @mock.patch.object(ilo_common, 'get_ilo_object', autospec=True)
+ def test_delete_configuration_ilo_error(self, ilo_mock,
+ set_clean_failed_mock):
+ ilo_mock_object = ilo_mock.return_value
+ exc = ilo_error.IloError('error')
+ ilo_mock_object.delete_raid_configuration.side_effect = exc
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.raid.delete_configuration(task)
+ ilo_mock_object.delete_raid_configuration.assert_called_once_with()
+ self.assertNotIn('ilo_raid_delete_in_progress',
+ task.node.driver_internal_info)
+ self.assertNotIn('cleaning_reboot',
+ task.node.driver_internal_info)
+ self.assertNotIn('skip_current_clean_step',
+ task.node.driver_internal_info)
+ set_clean_failed_mock.assert_called_once_with(
+ task,
+ 'Failed to delete raid configuration '
+ 'on node %s' % self.node.uuid, exc)
diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py
index 16c653c43..5e893a98c 100644
--- a/ironic/tests/unit/drivers/modules/test_agent.py
+++ b/ironic/tests/unit/drivers/modules/test_agent.py
@@ -1406,12 +1406,15 @@ class AgentRAIDTestCase(db_base.DbTestCase):
self.assertEqual(0, ret[0]['priority'])
self.assertEqual(0, ret[1]['priority'])
+ @mock.patch.object(raid, 'filter_target_raid_config')
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
- def test_create_configuration(self, execute_mock):
+ def test_create_configuration(self, execute_mock,
+ filter_target_raid_config_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
-
+ filter_target_raid_config_mock.return_value = (
+ self.target_raid_config)
return_value = task.driver.raid.create_configuration(task)
self.assertEqual(states.CLEANWAIT, return_value)
@@ -1420,65 +1423,76 @@ class AgentRAIDTestCase(db_base.DbTestCase):
task.node.driver_internal_info['target_raid_config'])
execute_mock.assert_called_once_with(task, self.clean_step)
+ @mock.patch.object(raid, 'filter_target_raid_config')
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
- def test_create_configuration_skip_root(self, execute_mock):
+ def test_create_configuration_skip_root(self, execute_mock,
+ filter_target_raid_config_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
-
- return_value = task.driver.raid.create_configuration(
- task, create_root_volume=False)
-
- self.assertEqual(states.CLEANWAIT, return_value)
- execute_mock.assert_called_once_with(task, self.clean_step)
exp_target_raid_config = {
"logical_disks": [
{'size_gb': 200, 'raid_level': 5}
]}
+ filter_target_raid_config_mock.return_value = (
+ exp_target_raid_config)
+ return_value = task.driver.raid.create_configuration(
+ task, create_root_volume=False)
+ self.assertEqual(states.CLEANWAIT, return_value)
+ execute_mock.assert_called_once_with(task, self.clean_step)
self.assertEqual(
exp_target_raid_config,
task.node.driver_internal_info['target_raid_config'])
+ @mock.patch.object(raid, 'filter_target_raid_config')
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
- def test_create_configuration_skip_nonroot(self, execute_mock):
+ def test_create_configuration_skip_nonroot(self, execute_mock,
+ filter_target_raid_config_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
-
- return_value = task.driver.raid.create_configuration(
- task, create_nonroot_volumes=False)
-
- self.assertEqual(states.CLEANWAIT, return_value)
- execute_mock.assert_called_once_with(task, self.clean_step)
exp_target_raid_config = {
"logical_disks": [
{'size_gb': 200, 'raid_level': 0, 'is_root_volume': True},
]}
+ filter_target_raid_config_mock.return_value = (
+ exp_target_raid_config)
+ return_value = task.driver.raid.create_configuration(
+ task, create_nonroot_volumes=False)
+ self.assertEqual(states.CLEANWAIT, return_value)
+ execute_mock.assert_called_once_with(task, self.clean_step)
self.assertEqual(
exp_target_raid_config,
task.node.driver_internal_info['target_raid_config'])
+ @mock.patch.object(raid, 'filter_target_raid_config')
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_no_target_raid_config_after_skipping(
- self, execute_mock):
+ self, execute_mock, filter_target_raid_config_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
+ msg = "Node %s has no target RAID configuration" % self.node.uuid
+ filter_target_raid_config_mock.side_effect = (
+ exception.MissingParameterValue(msg))
self.assertRaises(
exception.MissingParameterValue,
task.driver.raid.create_configuration,
task, create_root_volume=False,
create_nonroot_volumes=False)
-
self.assertFalse(execute_mock.called)
+ @mock.patch.object(raid, 'filter_target_raid_config')
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_empty_target_raid_config(
- self, execute_mock):
+ self, execute_mock, filter_target_raid_config_mock):
execute_mock.return_value = states.CLEANING
self.node.target_raid_config = {}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
+ msg = "Node %s has no target RAID configuration" % self.node.uuid
+ filter_target_raid_config_mock.side_effect = (
+ exception.MissingParameterValue(msg))
self.assertRaises(exception.MissingParameterValue,
task.driver.raid.create_configuration,
task)
diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
index d51885a69..4f6d82810 100644
--- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
@@ -1113,6 +1113,14 @@ class GetPxeBootConfigTestCase(db_base.DbTestCase):
result = utils.get_pxe_config_template(self.node)
self.assertEqual('bios-template', result)
+ def test_get_pxe_config_template_per_node(self):
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_info={"pxe_template": "fake-template"},
+ )
+ result = utils.get_pxe_config_template(node)
+ self.assertEqual('fake-template', result)
+
@mock.patch('time.sleep', lambda sec: None)
class OtherFunctionTestCase(db_base.DbTestCase):
diff --git a/ironic/tests/unit/drivers/test_ilo.py b/ironic/tests/unit/drivers/test_ilo.py
index 321ace576..ed5359fa0 100644
--- a/ironic/tests/unit/drivers/test_ilo.py
+++ b/ironic/tests/unit/drivers/test_ilo.py
@@ -19,6 +19,7 @@ Test class for iLO Drivers
from ironic.conductor import task_manager
from ironic.drivers import ilo
from ironic.drivers.modules import agent
+from ironic.drivers.modules.ilo import raid
from ironic.drivers.modules import inspector
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import noop
@@ -165,3 +166,47 @@ class IloHardwareTestCase(db_base.DbTestCase):
agent.AgentDeploy)
self.assertIsInstance(task.driver.raid,
agent.AgentRAID)
+
+
+class Ilo5HardwareTestCase(db_base.DbTestCase):
+
+ def setUp(self):
+ super(Ilo5HardwareTestCase, self).setUp()
+ self.config(enabled_hardware_types=['ilo5'],
+ enabled_boot_interfaces=['ilo-virtual-media', 'ilo-pxe'],
+ enabled_console_interfaces=['ilo'],
+ enabled_deploy_interfaces=['iscsi', 'direct'],
+ enabled_inspect_interfaces=['ilo'],
+ enabled_management_interfaces=['ilo'],
+ enabled_power_interfaces=['ilo'],
+ enabled_raid_interfaces=['ilo5'],
+ enabled_rescue_interfaces=['no-rescue', 'agent'],
+ enabled_vendor_interfaces=['ilo', 'no-vendor'])
+
+ def test_default_interfaces(self):
+ node = obj_utils.create_test_node(self.context, driver='ilo5')
+ with task_manager.acquire(self.context, node.id) as task:
+ self.assertIsInstance(task.driver.raid, raid.Ilo5RAID)
+
+ def test_override_with_no_raid(self):
+ self.config(enabled_raid_interfaces=['no-raid', 'ilo5'])
+ node = obj_utils.create_test_node(self.context, driver='ilo5',
+ raid_interface='no-raid')
+ with task_manager.acquire(self.context, node.id) as task:
+ self.assertIsInstance(task.driver.raid, noop.NoRAID)
+ self.assertIsInstance(task.driver.boot,
+ ilo.boot.IloVirtualMediaBoot)
+ self.assertIsInstance(task.driver.console,
+ ilo.console.IloConsoleInterface)
+ self.assertIsInstance(task.driver.deploy,
+ iscsi_deploy.ISCSIDeploy)
+ self.assertIsInstance(task.driver.inspect,
+ ilo.inspect.IloInspect)
+ self.assertIsInstance(task.driver.management,
+ ilo.management.IloManagement)
+ self.assertIsInstance(task.driver.power,
+ ilo.power.IloPower)
+ self.assertIsInstance(task.driver.rescue,
+ noop.NoRescue)
+ self.assertIsInstance(task.driver.vendor,
+ ilo.vendor.VendorPassthru)
diff --git a/ironic/tests/unit/drivers/third_party_driver_mocks.py b/ironic/tests/unit/drivers/third_party_driver_mocks.py
index 568a65deb..db82dfe7b 100644
--- a/ironic/tests/unit/drivers/third_party_driver_mocks.py
+++ b/ironic/tests/unit/drivers/third_party_driver_mocks.py
@@ -56,6 +56,8 @@ if not proliantutils:
sys.modules['proliantutils.utils'] = proliantutils.utils
proliantutils.utils.process_firmware_image = mock.MagicMock()
proliantutils.exception.IloError = type('IloError', (Exception,), {})
+ proliantutils.exception.IloLogicalDriveNotFoundError = (
+ type('IloLogicalDriveNotFoundError', (Exception,), {}))
command_exception = type('IloCommandNotSupportedError', (Exception,), {})
proliantutils.exception.IloCommandNotSupportedError = command_exception
proliantutils.exception.IloCommandNotSupportedInBiosError = type(
diff --git a/ironic/tests/unit/objects/test_allocation.py b/ironic/tests/unit/objects/test_allocation.py
new file mode 100644
index 000000000..baa7d8d71
--- /dev/null
+++ b/ironic/tests/unit/objects/test_allocation.py
@@ -0,0 +1,144 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import mock
+from testtools import matchers
+
+from ironic.common import exception
+from ironic import objects
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.db import utils as db_utils
+from ironic.tests.unit.objects import utils as obj_utils
+
+
+class TestAllocationObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
+
+ def setUp(self):
+ super(TestAllocationObject, self).setUp()
+ self.fake_allocation = db_utils.get_test_allocation(name='host1')
+
+ def test_get_by_id(self):
+ allocation_id = self.fake_allocation['id']
+ with mock.patch.object(self.dbapi, 'get_allocation_by_id',
+ autospec=True) as mock_get_allocation:
+ mock_get_allocation.return_value = self.fake_allocation
+
+ allocation = objects.Allocation.get(self.context, allocation_id)
+
+ mock_get_allocation.assert_called_once_with(allocation_id)
+ self.assertEqual(self.context, allocation._context)
+
+ def test_get_by_uuid(self):
+ uuid = self.fake_allocation['uuid']
+ with mock.patch.object(self.dbapi, 'get_allocation_by_uuid',
+ autospec=True) as mock_get_allocation:
+ mock_get_allocation.return_value = self.fake_allocation
+
+ allocation = objects.Allocation.get(self.context, uuid)
+
+ mock_get_allocation.assert_called_once_with(uuid)
+ self.assertEqual(self.context, allocation._context)
+
+ def test_get_by_name(self):
+ name = self.fake_allocation['name']
+ with mock.patch.object(self.dbapi, 'get_allocation_by_name',
+ autospec=True) as mock_get_allocation:
+ mock_get_allocation.return_value = self.fake_allocation
+ allocation = objects.Allocation.get(self.context, name)
+
+ mock_get_allocation.assert_called_once_with(name)
+ self.assertEqual(self.context, allocation._context)
+
+ def test_get_bad_id_and_uuid_and_name(self):
+ self.assertRaises(exception.InvalidIdentity,
+ objects.Allocation.get,
+ self.context,
+ 'not:a_name_or_uuid')
+
+ def test_create(self):
+ allocation = objects.Allocation(self.context, **self.fake_allocation)
+ with mock.patch.object(self.dbapi, 'create_allocation',
+ autospec=True) as mock_create_allocation:
+ mock_create_allocation.return_value = (
+ db_utils.get_test_allocation())
+
+ allocation.create()
+
+ args, _kwargs = mock_create_allocation.call_args
+ self.assertEqual(objects.Allocation.VERSION, args[0]['version'])
+
+ def test_save(self):
+ uuid = self.fake_allocation['uuid']
+ test_time = datetime.datetime(2000, 1, 1, 0, 0)
+ with mock.patch.object(self.dbapi, 'get_allocation_by_uuid',
+ autospec=True) as mock_get_allocation:
+ mock_get_allocation.return_value = self.fake_allocation
+ with mock.patch.object(self.dbapi, 'update_allocation',
+ autospec=True) as mock_update_allocation:
+ mock_update_allocation.return_value = (
+ db_utils.get_test_allocation(name='newname',
+ updated_at=test_time))
+ p = objects.Allocation.get_by_uuid(self.context, uuid)
+ p.name = 'newname'
+ p.save()
+
+ mock_get_allocation.assert_called_once_with(uuid)
+ mock_update_allocation.assert_called_once_with(
+ uuid, {'version': objects.Allocation.VERSION,
+ 'name': 'newname'})
+ self.assertEqual(self.context, p._context)
+ res_updated_at = (p.updated_at).replace(tzinfo=None)
+ self.assertEqual(test_time, res_updated_at)
+
+ def test_refresh(self):
+ uuid = self.fake_allocation['uuid']
+ returns = [self.fake_allocation,
+ db_utils.get_test_allocation(name='newname')]
+ expected = [mock.call(uuid), mock.call(uuid)]
+ with mock.patch.object(self.dbapi, 'get_allocation_by_uuid',
+ side_effect=returns,
+ autospec=True) as mock_get_allocation:
+ p = objects.Allocation.get_by_uuid(self.context, uuid)
+ self.assertEqual(self.fake_allocation['name'], p.name)
+ p.refresh()
+ self.assertEqual('newname', p.name)
+
+ self.assertEqual(expected, mock_get_allocation.call_args_list)
+ self.assertEqual(self.context, p._context)
+
+ def test_save_after_refresh(self):
+ # Ensure that it's possible to do object.save() after object.refresh()
+ db_allocation = db_utils.create_test_allocation()
+ p = objects.Allocation.get_by_uuid(self.context, db_allocation.uuid)
+ p_copy = objects.Allocation.get_by_uuid(self.context,
+ db_allocation.uuid)
+ p.name = 'newname'
+ p.save()
+ p_copy.refresh()
+ p.copy = 'newname2'
+ # Ensure this passes and an exception is not generated
+ p_copy.save()
+
+ def test_list(self):
+ with mock.patch.object(self.dbapi, 'get_allocation_list',
+ autospec=True) as mock_get_list:
+ mock_get_list.return_value = [self.fake_allocation]
+ allocations = objects.Allocation.list(self.context)
+ self.assertThat(allocations, matchers.HasLength(1))
+ self.assertIsInstance(allocations[0], objects.Allocation)
+ self.assertEqual(self.context, allocations[0]._context)
+
+ def test_payload_schemas(self):
+ self._check_payload_schemas(objects.allocation,
+ objects.Allocation.fields)
diff --git a/ironic/tests/unit/objects/test_node.py b/ironic/tests/unit/objects/test_node.py
index 536e86ac0..6d25cad24 100644
--- a/ironic/tests/unit/objects/test_node.py
+++ b/ironic/tests/unit/objects/test_node.py
@@ -888,6 +888,67 @@ class TestConvertToVersion(db_base.DbTestCase):
self.assertIsNone(node.owner)
self.assertEqual({}, node.obj_get_changes())
+ def test_allocation_id_supported_missing(self):
+ # allocation_id_interface not set, should be set to default.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+ delattr(node, 'allocation_id')
+ node.obj_reset_changes()
+ node._convert_to_version("1.31")
+ self.assertIsNone(node.allocation_id)
+ self.assertEqual({'allocation_id': None},
+ node.obj_get_changes())
+
+ def test_allocation_id_supported_set(self):
+ # allocation_id set, no change required.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ node.allocation_id = 42
+ node.obj_reset_changes()
+ node._convert_to_version("1.31")
+ self.assertEqual(42, node.allocation_id)
+ self.assertEqual({}, node.obj_get_changes())
+
+ def test_allocation_id_unsupported_missing(self):
+ # allocation_id not set, no change required.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ delattr(node, 'allocation_id')
+ node.obj_reset_changes()
+ node._convert_to_version("1.30")
+ self.assertNotIn('allocation_id', node)
+ self.assertEqual({}, node.obj_get_changes())
+
+ def test_allocation_id_unsupported_set_remove(self):
+ # allocation_id set, should be removed.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ node.allocation_id = 42
+ node.obj_reset_changes()
+ node._convert_to_version("1.30")
+ self.assertNotIn('allocation_id', node)
+ self.assertEqual({}, node.obj_get_changes())
+
+ def test_allocation_id_unsupported_set_no_remove_non_default(self):
+ # allocation_id set, should be set to default.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ node.allocation_id = 42
+ node.obj_reset_changes()
+ node._convert_to_version("1.30", False)
+ self.assertIsNone(node.allocation_id)
+ self.assertEqual({'allocation_id': None},
+ node.obj_get_changes())
+
+ def test_allocation_id_unsupported_set_no_remove_default(self):
+ # allocation_id set, no change required.
+ node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
+
+ node.allocation_id = None
+ node.obj_reset_changes()
+ node._convert_to_version("1.30", False)
+ self.assertIsNone(node.allocation_id)
+ self.assertEqual({}, node.obj_get_changes())
+
class TestNodePayloads(db_base.DbTestCase):
diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py
index 4f409053c..bd8808506 100644
--- a/ironic/tests/unit/objects/test_objects.py
+++ b/ironic/tests/unit/objects/test_objects.py
@@ -677,7 +677,7 @@ class TestObject(_LocalTest, _TestObject):
# version bump. It is an MD5 hash of the object fields and remotable methods.
# The fingerprint values should only be changed if there is a version bump.
expected_object_fingerprints = {
- 'Node': '1.30-8313460d6ea5457a527cd3d85e5ee3d8',
+ 'Node': '1.31-1b77c11e94f971a71c76f5f44fb5b3f4',
'MyObj': '1.5-9459d30d6954bffc7a9afd347a807ca6',
'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905',
'Port': '1.8-898a47921f4a1f53fcdddd4eeb179e0b',
@@ -714,6 +714,9 @@ expected_object_fingerprints = {
'TraitList': '1.0-33a2e1bb91ad4082f9f63429b77c1244',
'BIOSSetting': '1.0-fd4a791dc2139a7cc21cefbbaedfd9e7',
'BIOSSettingList': '1.0-33a2e1bb91ad4082f9f63429b77c1244',
+ 'Allocation': '1.0-25ebf609743cd3f332a4f80fcb818102',
+ 'AllocationCRUDNotification': '1.0-59acc533c11d306f149846f922739c15',
+ 'AllocationCRUDPayload': '1.0-a82389d019f37cfe54b50049f73911b3',
}