summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/source/install/enrollment.rst26
-rw-r--r--ironic/api/controllers/v1/__init__.py146
-rw-r--r--ironic/api/controllers/v1/node.py3
-rw-r--r--ironic/api/controllers/v1/ramdisk.py6
-rw-r--r--ironic/common/image_service.py35
-rw-r--r--ironic/common/kickstart_utils.py4
-rw-r--r--ironic/common/molds.py6
-rw-r--r--ironic/common/rpc_service.py18
-rw-r--r--ironic/conductor/base_manager.py17
-rw-r--r--ironic/conductor/inspection.py108
-rw-r--r--ironic/conductor/manager.py83
-rw-r--r--ironic/conductor/utils.py5
-rw-r--r--ironic/conf/conductor.py8
-rw-r--r--ironic/conf/default.py5
-rw-r--r--ironic/conf/inventory.py15
-rw-r--r--ironic/db/sqlalchemy/api.py6
-rw-r--r--ironic/drivers/modules/ansible/playbooks/library/stream_url.py3
-rw-r--r--ironic/drivers/modules/ilo/boot.py12
-rw-r--r--ironic/drivers/modules/inspect_utils.py116
-rw-r--r--ironic/drivers/modules/inspector/interface.py14
-rw-r--r--ironic/tests/base.py15
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py44
-rw-r--r--ironic/tests/unit/common/test_image_service.py107
-rw-r--r--ironic/tests/unit/common/test_kickstart_utils.py3
-rw-r--r--ironic/tests/unit/common/test_molds.py35
-rw-r--r--ironic/tests/unit/common/test_rpc_service.py50
-rw-r--r--ironic/tests/unit/conductor/test_cleaning.py30
-rw-r--r--ironic/tests/unit/conductor/test_inspection.py118
-rw-r--r--ironic/tests/unit/conductor/test_manager.py119
-rw-r--r--ironic/tests/unit/conductor/test_utils.py11
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_boot.py78
-rw-r--r--ironic/tests/unit/drivers/modules/inspector/test_interface.py12
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspect_utils.py133
-rw-r--r--ironic/tests/unit/objects/utils.py12
-rw-r--r--playbooks/metal3-ci/run.yaml10
-rw-r--r--releasenotes/notes/Cleanfail-power-off-13b5fdcc2727866a.yaml8
-rw-r--r--releasenotes/notes/cross-link-1ffd1a4958f14fd7.yaml5
-rw-r--r--releasenotes/notes/fix_boot_mode_switch_with_anaconda_deploy_with_ilo_drivers-16637adb62f0ed2f.yaml5
-rw-r--r--releasenotes/notes/graceful_shutdown_wait-9a62627714b86726.yaml15
-rwxr-xr-xtools/test-setup.sh2
-rw-r--r--tox.ini2
-rw-r--r--zuul.d/ironic-jobs.yaml1
42 files changed, 766 insertions, 685 deletions
diff --git a/doc/source/install/enrollment.rst b/doc/source/install/enrollment.rst
index 2a4f23a9a..97e2d33d4 100644
--- a/doc/source/install/enrollment.rst
+++ b/doc/source/install/enrollment.rst
@@ -81,15 +81,9 @@ affected, since the initial provision state is still ``available``.
However, using API version 1.11 or above may break existing automation tooling
with respect to node creation.
-The default API version used by (the most recent) python-ironicclient is 1.9,
-but it may change in the future and should not be relied on.
-
-In the examples below we will use version 1.11 of the Bare metal API.
-This gives us the following advantages:
-
-* Explicit power credentials validation before leaving the ``enroll`` state.
-* Running node cleaning before entering the ``available`` state.
-* Not exposing half-configured nodes to the scheduler.
+The ``openstack baremetal`` command line tool tries to negotiate the most
+recent supported version, which in virtually all cases will be newer than
+1.11.
To set the API version for all commands, you can set the environment variable
``IRONIC_API_VERSION``. For the OpenStackClient baremetal plugin, set
@@ -118,7 +112,6 @@ and may be combined if desired.
.. code-block:: console
- $ export OS_BAREMETAL_API_VERSION=1.11
$ baremetal node create --driver ipmi
+--------------+--------------------------------------+
| Property | Value |
@@ -423,12 +416,13 @@ Validating node information
Making node available for deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In order for nodes to be available for deploying workloads on them, nodes
-must be in the ``available`` provision state. To do this, nodes
-created with API version 1.11 and above must be moved from the ``enroll`` state
-to the ``manageable`` state and then to the ``available`` state.
-This section can be safely skipped, if API version 1.10 or earlier is used
-(which is the case by default).
+In order for nodes to be available for deploying workloads on them, nodes must
+be in the ``available`` provision state. To do this, nodes must be moved from
+the ``enroll`` state to the ``manageable`` state and then to the ``available``
+state.
+
+.. note::
+ This section can be skipped, if API version 1.10 or earlier is used.
After creating a node and before moving it from its initial provision state of
``enroll``, basic power and port information needs to be configured on the node.
diff --git a/ironic/api/controllers/v1/__init__.py b/ironic/api/controllers/v1/__init__.py
index 9bd9af985..c352761c7 100644
--- a/ironic/api/controllers/v1/__init__.py
+++ b/ironic/api/controllers/v1/__init__.py
@@ -59,6 +59,27 @@ def max_version():
versions.min_version_string(), versions.max_version_string())
+def make_controller_links(name):
+ return [
+ link.make_link('self', api.request.public_url, name, ''),
+ link.make_link('bookmark', api.request.public_url, name, '',
+ bookmark=True)
+ ]
+
+
+VERSIONED_CONTROLLERS = {
+ 'portgroups': utils.allow_portgroups,
+ 'volume': utils.allow_volume,
+ 'lookup': utils.allow_ramdisk_endpoints,
+ 'heartbeat': utils.allow_ramdisk_endpoints,
+ 'conductors': utils.allow_expose_conductors,
+ 'allocations': utils.allow_allocations,
+ 'events': utils.allow_expose_events,
+ 'deploy_templates': utils.allow_deploy_templates,
+ 'shards': utils.allow_shards_endpoint,
+}
+
+
def v1():
v1 = {
'id': "v1",
@@ -75,124 +96,15 @@ def v1():
'base': 'application/json',
'type': 'application/vnd.openstack.ironic.v1+json'
},
- 'chassis': [
- link.make_link('self', api.request.public_url,
- 'chassis', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'chassis', '',
- bookmark=True)
- ],
- 'nodes': [
- link.make_link('self', api.request.public_url,
- 'nodes', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'nodes', '',
- bookmark=True)
- ],
- 'ports': [
- link.make_link('self', api.request.public_url,
- 'ports', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'ports', '',
- bookmark=True)
- ],
- 'drivers': [
- link.make_link('self', api.request.public_url,
- 'drivers', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'drivers', '',
- bookmark=True)
- ],
- 'version': version.default_version()
+ 'chassis': make_controller_links('chassis'),
+ 'nodes': make_controller_links('nodes'),
+ 'ports': make_controller_links('ports'),
+ 'drivers': make_controller_links('drivers'),
+ 'version': version.default_version(),
}
- if utils.allow_portgroups():
- v1['portgroups'] = [
- link.make_link('self', api.request.public_url,
- 'portgroups', ''),
- link.make_link('bookmark', api.request.public_url,
- 'portgroups', '', bookmark=True)
- ]
- if utils.allow_volume():
- v1['volume'] = [
- link.make_link('self',
- api.request.public_url,
- 'volume', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'volume', '',
- bookmark=True)
- ]
- if utils.allow_ramdisk_endpoints():
- v1['lookup'] = [
- link.make_link('self', api.request.public_url,
- 'lookup', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'lookup', '',
- bookmark=True)
- ]
- v1['heartbeat'] = [
- link.make_link('self',
- api.request.public_url,
- 'heartbeat', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'heartbeat', '',
- bookmark=True)
- ]
- if utils.allow_expose_conductors():
- v1['conductors'] = [
- link.make_link('self',
- api.request.public_url,
- 'conductors', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'conductors', '',
- bookmark=True)
- ]
- if utils.allow_allocations():
- v1['allocations'] = [
- link.make_link('self',
- api.request.public_url,
- 'allocations', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'allocations', '',
- bookmark=True)
- ]
- if utils.allow_expose_events():
- v1['events'] = [
- link.make_link('self', api.request.public_url,
- 'events', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'events', '',
- bookmark=True)
- ]
- if utils.allow_deploy_templates():
- v1['deploy_templates'] = [
- link.make_link('self',
- api.request.public_url,
- 'deploy_templates', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'deploy_templates', '',
- bookmark=True)
- ]
- if utils.allow_shards_endpoint():
- v1['shards'] = [
- link.make_link('self',
- api.request.public_url,
- 'shards', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'shards', '',
- bookmark=True)
- ]
+ for link_name, check_func in VERSIONED_CONTROLLERS.items():
+ if check_func():
+ v1[link_name] = make_controller_links(link_name)
return v1
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index 5bf0a4981..65adee544 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -1974,8 +1974,7 @@ class NodeInventoryController(rest.RestController):
"""
node = api_utils.check_node_policy_and_retrieve(
'baremetal:node:inventory:get', self.node_ident)
- return inspect_utils.get_introspection_data(node,
- api.request.context)
+ return inspect_utils.get_inspection_data(node, api.request.context)
class NodesController(rest.RestController):
diff --git a/ironic/api/controllers/v1/ramdisk.py b/ironic/api/controllers/v1/ramdisk.py
index 5feef5e02..b98eb7dc2 100644
--- a/ironic/api/controllers/v1/ramdisk.py
+++ b/ironic/api/controllers/v1/ramdisk.py
@@ -131,13 +131,17 @@ class LookupController(rest.RestController):
else:
node = objects.Node.get_by_port_addresses(
api.request.context, valid_addresses)
- except exception.NotFound:
+ except exception.NotFound as e:
# NOTE(dtantsur): we are reraising the same exception to make sure
# we don't disclose the difference between nodes that are not found
# at all and nodes in a wrong state by different error messages.
+ LOG.error('No node has been found during lookup: %s', e)
raise exception.NotFound()
if CONF.api.restrict_lookup and not self.lookup_allowed(node):
+ LOG.error('Lookup is not allowed for node %(node)s in the '
+ 'provision state %(state)s',
+ {'node': node.uuid, 'state': node.provision_state})
raise exception.NotFound()
if api_utils.allow_agent_token():
diff --git a/ironic/common/image_service.py b/ironic/common/image_service.py
index 5280ee6bc..4b95b5152 100644
--- a/ironic/common/image_service.py
+++ b/ironic/common/image_service.py
@@ -34,10 +34,6 @@ from ironic.common import utils
from ironic.conf import CONF
IMAGE_CHUNK_SIZE = 1024 * 1024 # 1mb
-# NOTE(kaifeng) Image will be truncated to 2GiB by sendfile,
-# we use a large chunk size here for a better performance
-# while keep the chunk size less than the size limit.
-SENDFILE_CHUNK_SIZE = 1024 * 1024 * 1024 # 1Gb
LOG = log.getLogger(__name__)
@@ -264,26 +260,23 @@ class FileImageService(BaseImageService):
"""
source_image_path = self.validate_href(image_href)
dest_image_path = image_file.name
- local_device = os.stat(dest_image_path).st_dev
try:
- # We should have read and write access to source file to create
- # hard link to it.
- if (local_device == os.stat(source_image_path).st_dev
- and os.access(source_image_path, os.R_OK | os.W_OK)):
- image_file.close()
- os.remove(dest_image_path)
+ image_file.close()
+ os.remove(dest_image_path)
+
+ try:
os.link(source_image_path, dest_image_path)
+ except OSError as exc:
+ LOG.debug('Could not create a link from %(src)s to %(dest)s, '
+ 'will copy the content instead. Error: %(exc)s.',
+ {'src': source_image_path, 'dest': dest_image_path,
+ 'exc': exc})
else:
- filesize = os.path.getsize(source_image_path)
- offset = 0
- with open(source_image_path, 'rb') as input_img:
- while offset < filesize:
- count = min(SENDFILE_CHUNK_SIZE, filesize - offset)
- nbytes_out = os.sendfile(image_file.fileno(),
- input_img.fileno(),
- offset,
- count)
- offset += nbytes_out
+ return
+
+ # NOTE(dtantsur): starting with Python 3.8, copyfile() uses
+ # efficient copying (i.e. sendfile) under the hood.
+ shutil.copyfile(source_image_path, dest_image_path)
except Exception as e:
raise exception.ImageDownloadFailed(image_href=image_href,
reason=str(e))
diff --git a/ironic/common/kickstart_utils.py b/ironic/common/kickstart_utils.py
index 433cf2390..4e02e2ea7 100644
--- a/ironic/common/kickstart_utils.py
+++ b/ironic/common/kickstart_utils.py
@@ -23,6 +23,7 @@ import pycdlib
import requests
from ironic.common import exception
+from ironic.conf import CONF
LOG = logging.getLogger(__name__)
@@ -107,7 +108,8 @@ def decode_and_extract_config_drive_iso(config_drive_iso_gz):
def _fetch_config_drive_from_url(url):
try:
- config_drive = requests.get(url).content
+ config_drive = requests.get(
+ url, timeout=CONF.webserver_connection_timeout).content
except requests.exceptions.RequestException as e:
raise exception.InstanceDeployFailure(
"Can't download the configdrive content from '%(url)s'. "
diff --git a/ironic/common/molds.py b/ironic/common/molds.py
index 234fcc6e3..a77e42a63 100644
--- a/ironic/common/molds.py
+++ b/ironic/common/molds.py
@@ -49,7 +49,8 @@ def save_configuration(task, url, data):
)
def _request(url, data, auth_header):
return requests.put(
- url, data=json.dumps(data, indent=2), headers=auth_header)
+ url, data=json.dumps(data, indent=2), headers=auth_header,
+ timeout=CONF.webserver_connection_timeout)
auth_header = _get_auth_header(task)
response = _request(url, data, auth_header)
@@ -76,7 +77,8 @@ def get_configuration(task, url):
reraise=True
)
def _request(url, auth_header):
- return requests.get(url, headers=auth_header)
+ return requests.get(url, headers=auth_header,
+ timeout=CONF.webserver_connection_timeout)
auth_header = _get_auth_header(task)
response = _request(url, auth_header)
diff --git a/ironic/common/rpc_service.py b/ironic/common/rpc_service.py
index cb0f23c98..a74f6bab3 100644
--- a/ironic/common/rpc_service.py
+++ b/ironic/common/rpc_service.py
@@ -100,7 +100,8 @@ class RPCService(service.Service):
seconds=CONF.hash_ring_reset_interval)
try:
- self.manager.del_host(deregister=self.deregister)
+ self.manager.del_host(deregister=self.deregister,
+ clear_node_reservations=False)
except Exception as e:
LOG.exception('Service error occurred when cleaning up '
'the RPC manager. Error: %s', e)
@@ -127,6 +128,21 @@ class RPCService(service.Service):
LOG.info('Stopped RPC server for service %(service)s on host '
'%(host)s.',
{'service': self.topic, 'host': self.host})
+
+ # Wait for reservation locks held by this conductor.
+ # The conductor process will end when:
+ # - All reservations for this conductor are released
+ # - CONF.graceful_shutdown_timeout has elapsed
+ # - The process manager (systemd, kubernetes) sends SIGKILL after the
+ # configured graceful period
+ graceful_time = initial_time + datetime.timedelta(
+ seconds=CONF.graceful_shutdown_timeout)
+ while (self.manager.has_reserved()
+ and graceful_time > timeutils.utcnow()):
+ LOG.info('Waiting for reserved nodes to clear on host %(host)s',
+ {'host': self.host})
+ time.sleep(1)
+
rpc.set_global_manager(None)
def _handle_signal(self, signo, frame):
diff --git a/ironic/conductor/base_manager.py b/ironic/conductor/base_manager.py
index 5c2e4ea95..544411e1d 100644
--- a/ironic/conductor/base_manager.py
+++ b/ironic/conductor/base_manager.py
@@ -298,15 +298,17 @@ class BaseConductorManager(object):
# This is only used in tests currently. Delete it?
self._periodic_task_callables = periodic_task_callables
- def del_host(self, deregister=True):
+ def del_host(self, deregister=True, clear_node_reservations=True):
# Conductor deregistration fails if called on non-initialized
# conductor (e.g. when rpc server is unreachable).
if not hasattr(self, 'conductor'):
return
self._shutdown = True
self._keepalive_evt.set()
- # clear all locks held by this conductor before deregistering
- self.dbapi.clear_node_reservations_for_conductor(self.host)
+
+ if clear_node_reservations:
+ # clear all locks held by this conductor before deregistering
+ self.dbapi.clear_node_reservations_for_conductor(self.host)
if deregister:
try:
# Inform the cluster that this conductor is shutting down.
@@ -338,6 +340,15 @@ class BaseConductorManager(object):
"""Return a count of currently online conductors"""
return len(self.dbapi.get_online_conductors())
+ def has_reserved(self):
+ """Determines if this host currently has any reserved nodes
+
+ :returns: True if this host has reserved nodes
+ """
+ return bool(self.dbapi.get_nodeinfo_list(
+ filters={'reserved_by_any_of': [self.host]},
+ limit=1))
+
def _register_and_validate_hardware_interfaces(self, hardware_types):
"""Register and validate hardware interfaces for this conductor.
diff --git a/ironic/conductor/inspection.py b/ironic/conductor/inspection.py
new file mode 100644
index 000000000..53c76e99d
--- /dev/null
+++ b/ironic/conductor/inspection.py
@@ -0,0 +1,108 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Inspection implementation for the conductor."""
+
+from oslo_log import log
+from oslo_utils import excutils
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import states
+from ironic.conductor import task_manager
+from ironic.conductor import utils
+
+LOG = log.getLogger(__name__)
+
+
+@task_manager.require_exclusive_lock
+def inspect_hardware(task):
+ """Initiates inspection.
+
+ :param task: a TaskManager instance with an exclusive lock
+ on its node.
+ :raises: HardwareInspectionFailure if driver doesn't
+ return the state as states.MANAGEABLE, states.INSPECTWAIT.
+
+ """
+ node = task.node
+
+ def handle_failure(e, log_func=LOG.error):
+ utils.node_history_record(task.node, event=e,
+ event_type=states.INTROSPECTION,
+ error=True, user=task.context.user_id)
+ task.process_event('fail')
+ log_func("Failed to inspect node %(node)s: %(err)s",
+ {'node': node.uuid, 'err': e})
+
+ # Inspection cannot start in fast-track mode, wipe token and URL.
+ utils.wipe_token_and_url(task)
+
+ try:
+ new_state = task.driver.inspect.inspect_hardware(task)
+ except exception.IronicException as e:
+ with excutils.save_and_reraise_exception():
+ error = str(e)
+ handle_failure(error)
+ except Exception as e:
+ error = (_('Unexpected exception of type %(type)s: %(msg)s') %
+ {'type': type(e).__name__, 'msg': e})
+ handle_failure(error, log_func=LOG.exception)
+ raise exception.HardwareInspectionFailure(error=error)
+
+ if new_state == states.MANAGEABLE:
+ task.process_event('done')
+ LOG.info('Successfully inspected node %(node)s',
+ {'node': node.uuid})
+ elif new_state == states.INSPECTWAIT:
+ task.process_event('wait')
+ LOG.info('Successfully started introspection on node %(node)s',
+ {'node': node.uuid})
+ else:
+ error = (_("During inspection, driver returned unexpected "
+ "state %(state)s") % {'state': new_state})
+ handle_failure(error)
+ raise exception.HardwareInspectionFailure(error=error)
+
+
+@task_manager.require_exclusive_lock
+def abort_inspection(task):
+ """Abort inspection for the node."""
+ node = task.node
+
+ try:
+ task.driver.inspect.abort(task)
+ except exception.UnsupportedDriverExtension:
+ with excutils.save_and_reraise_exception():
+ LOG.error('Inspect interface "%(intf)s" does not support abort '
+ 'operation for node %(node)s',
+ {'intf': node.inspect_interface, 'node': node.uuid})
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ LOG.exception('Error when aborting inspection of node %(node)s',
+ {'node': node.uuid})
+ error = _('Failed to abort inspection: %s') % e
+ utils.node_history_record(task.node, event=error,
+ event_type=states.INTROSPECTION,
+ error=True,
+ user=task.context.user_id)
+ node.save()
+
+ error = _('Inspection was aborted by request.')
+ utils.node_history_record(task.node, event=error,
+ event_type=states.INTROSPECTION,
+ error=True,
+ user=task.context.user_id)
+ utils.wipe_token_and_url(task)
+ task.process_event('abort')
+ LOG.info('Successfully aborted inspection of node %(node)s',
+ {'node': node.uuid})
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 74e3192cf..bbd2355bd 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -63,6 +63,7 @@ from ironic.conductor import allocations
from ironic.conductor import base_manager
from ironic.conductor import cleaning
from ironic.conductor import deployments
+from ironic.conductor import inspection
from ironic.conductor import notification_utils as notify_utils
from ironic.conductor import periodics
from ironic.conductor import steps as conductor_steps
@@ -1365,35 +1366,7 @@ class ConductorManager(base_manager.BaseConductorManager):
return
if node.provision_state == states.INSPECTWAIT:
- try:
- task.driver.inspect.abort(task)
- except exception.UnsupportedDriverExtension:
- with excutils.save_and_reraise_exception():
- intf_name = task.driver.inspect.__class__.__name__
- LOG.error('Inspect interface %(intf)s does not '
- 'support abort operation when aborting '
- 'inspection of node %(node)s',
- {'intf': intf_name, 'node': node.uuid})
- except Exception as e:
- with excutils.save_and_reraise_exception():
- LOG.exception('Error in aborting the inspection of '
- 'node %(node)s', {'node': node.uuid})
- error = _('Failed to abort inspection: %s') % e
- utils.node_history_record(task.node, event=error,
- event_type=states.INTROSPECTION,
- error=True,
- user=task.context.user_id)
- node.save()
- error = _('Inspection was aborted by request.')
- utils.node_history_record(task.node, event=error,
- event_type=states.INTROSPECTION,
- error=True,
- user=task.context.user_id)
- utils.wipe_token_and_url(task)
- task.process_event('abort')
- LOG.info('Successfully aborted inspection of node %(node)s',
- {'node': node.uuid})
- return
+ return inspection.abort_inspection(task)
@METRICS.timer('ConductorManager._sync_power_states')
@periodics.periodic(spacing=CONF.conductor.sync_power_state_interval,
@@ -3038,7 +3011,7 @@ class ConductorManager(base_manager.BaseConductorManager):
task.process_event(
'inspect',
callback=self._spawn_worker,
- call_args=(_do_inspect_hardware, task),
+ call_args=(inspection.inspect_hardware, task),
err_handler=utils.provisioning_error_handler)
except exception.InvalidState:
@@ -3858,53 +3831,3 @@ def do_sync_power_state(task, count):
task, old_power_state)
return count
-
-
-@task_manager.require_exclusive_lock
-def _do_inspect_hardware(task):
- """Initiates inspection.
-
- :param task: a TaskManager instance with an exclusive lock
- on its node.
- :raises: HardwareInspectionFailure if driver doesn't
- return the state as states.MANAGEABLE, states.INSPECTWAIT.
-
- """
- node = task.node
-
- def handle_failure(e, log_func=LOG.error):
- utils.node_history_record(task.node, event=e,
- event_type=states.INTROSPECTION,
- error=True, user=task.context.user_id)
- task.process_event('fail')
- log_func("Failed to inspect node %(node)s: %(err)s",
- {'node': node.uuid, 'err': e})
-
- # Inspection cannot start in fast-track mode, wipe token and URL.
- utils.wipe_token_and_url(task)
-
- try:
- new_state = task.driver.inspect.inspect_hardware(task)
- except exception.IronicException as e:
- with excutils.save_and_reraise_exception():
- error = str(e)
- handle_failure(error)
- except Exception as e:
- error = (_('Unexpected exception of type %(type)s: %(msg)s') %
- {'type': type(e).__name__, 'msg': e})
- handle_failure(error, log_func=LOG.exception)
- raise exception.HardwareInspectionFailure(error=error)
-
- if new_state == states.MANAGEABLE:
- task.process_event('done')
- LOG.info('Successfully inspected node %(node)s',
- {'node': node.uuid})
- elif new_state == states.INSPECTWAIT:
- task.process_event('wait')
- LOG.info('Successfully started introspection on node %(node)s',
- {'node': node.uuid})
- else:
- error = (_("During inspection, driver returned unexpected "
- "state %(state)s") % {'state': new_state})
- handle_failure(error)
- raise exception.HardwareInspectionFailure(error=error)
diff --git a/ironic/conductor/utils.py b/ironic/conductor/utils.py
index 2272c0df7..add9ee74d 100644
--- a/ironic/conductor/utils.py
+++ b/ironic/conductor/utils.py
@@ -499,6 +499,11 @@ def cleaning_error_handler(task, logmsg, errmsg=None, traceback=False,
# NOTE(dtantsur): avoid overwriting existing maintenance_reason
if not node.maintenance_reason and set_maintenance:
node.maintenance_reason = errmsg
+
+ if CONF.conductor.poweroff_in_cleanfail:
+ # NOTE(NobodyCam): Power off node in clean fail
+ node_power_action(task, states.POWER_OFF)
+
node.save()
if set_fail_state and node.provision_state != states.CLEANFAIL:
diff --git a/ironic/conf/conductor.py b/ironic/conf/conductor.py
index 653e30f56..2452fafe7 100644
--- a/ironic/conf/conductor.py
+++ b/ironic/conf/conductor.py
@@ -349,6 +349,14 @@ opts = [
'is a global setting applying to all requests this '
'conductor receives, regardless of access rights. '
'The concurrent clean limit cannot be disabled.')),
+
+ cfg.BoolOpt('poweroff_in_cleanfail',
+ default=False,
+ help=_('If True power off nodes in the ``clean failed`` '
+ 'state. Default False. Option may be unsafe '
+ 'when using Cleaning to perform '
+ 'hardware-transformative actions such as '
+ 'firmware upgrade.')),
]
diff --git a/ironic/conf/default.py b/ironic/conf/default.py
index c7aff69cc..5b40c1f31 100644
--- a/ironic/conf/default.py
+++ b/ironic/conf/default.py
@@ -426,8 +426,9 @@ webserver_opts = [
'Defaults to True.')),
cfg.IntOpt('webserver_connection_timeout',
default=60,
- help=_('Connection timeout when accessing remote web servers '
- 'with images.')),
+ help=_('Connection timeout when accessing/interacting with '
+ 'remote web servers with images or other artifacts '
+ 'being accessed.')),
]
diff --git a/ironic/conf/inventory.py b/ironic/conf/inventory.py
index 52f31bf60..47e88c6f9 100644
--- a/ironic/conf/inventory.py
+++ b/ironic/conf/inventory.py
@@ -17,16 +17,17 @@ from ironic.common.i18n import _
opts = [
cfg.StrOpt('data_backend',
- help=_('The storage backend for storing introspection data.'),
- choices=[('none', _('introspection data will not be stored')),
- ('database', _('introspection data stored in an SQL '
- 'database')),
- ('swift', _('introspection data stored in Swift'))],
+ help=_('The storage backend for storing inspection data.'),
+ choices=[
+ ('none', _('do not store inspection data')),
+ ('database', _('store in the service database')),
+ ('swift', _('store in the Object Storage (swift)')),
+ ],
default='database'),
cfg.StrOpt('swift_data_container',
default='introspection_data_container',
- help=_('The Swift introspection data container to store '
- 'the inventory data.')),
+ help=_('The Swift container prefix to store the inspection '
+ 'data (separately inventory and plugin data).')),
]
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index 93a211fc3..31ec9647e 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -1379,12 +1379,14 @@ class Connection(api.Connection):
def list_hardware_type_interfaces(self, hardware_types):
with _session_for_read() as session:
- query = (session.query(models.ConductorHardwareInterfaces)
+ query = (session.query(models.ConductorHardwareInterfaces,
+ models.Conductor)
+ .join(models.Conductor)
.filter(models.ConductorHardwareInterfaces.hardware_type
.in_(hardware_types)))
query = _filter_active_conductors(query)
- return query.all()
+ return [row[0] for row in query]
@oslo_db_api.retry_on_deadlock
def register_conductor_hardware_interfaces(self, conductor_id, interfaces):
diff --git a/ironic/drivers/modules/ansible/playbooks/library/stream_url.py b/ironic/drivers/modules/ansible/playbooks/library/stream_url.py
index 0da3cc4dd..786b65013 100644
--- a/ironic/drivers/modules/ansible/playbooks/library/stream_url.py
+++ b/ironic/drivers/modules/ansible/playbooks/library/stream_url.py
@@ -31,7 +31,8 @@ class StreamingDownloader(object):
else:
self.hasher = None
self.chunksize = chunksize
- resp = requests.get(url, stream=True, verify=verify, cert=certs)
+ resp = requests.get(url, stream=True, verify=verify, cert=certs,
+ timeout=30)
if resp.status_code != 200:
raise Exception('Invalid response code: %s' % resp.status_code)
diff --git a/ironic/drivers/modules/ilo/boot.py b/ironic/drivers/modules/ilo/boot.py
index e29852981..fe2cef02a 100644
--- a/ironic/drivers/modules/ilo/boot.py
+++ b/ironic/drivers/modules/ilo/boot.py
@@ -604,6 +604,12 @@ class IloPXEBoot(pxe.PXEBoot):
else:
# Volume boot in BIOS boot mode is handled using
# PXE boot interface
+ boot_option = deploy_utils.get_boot_option(task.node)
+ if boot_option == "kickstart":
+ if task.node.provision_state in (states.DEPLOYING,
+ states.RESCUING,
+ states.CLEANING):
+ prepare_node_for_deploy(task)
super(IloPXEBoot, self).prepare_instance(task)
@METRICS.timer('IloPXEBoot.clean_up_instance')
@@ -696,6 +702,12 @@ class IloiPXEBoot(ipxe.iPXEBoot):
else:
# Volume boot in BIOS boot mode is handled using
# PXE boot interface
+ boot_option = deploy_utils.get_boot_option(task.node)
+ if boot_option == "kickstart":
+ if task.node.provision_state in (states.DEPLOYING,
+ states.RESCUING,
+ states.CLEANING):
+ prepare_node_for_deploy(task)
super(IloiPXEBoot, self).prepare_instance(task)
@METRICS.timer('IloiPXEBoot.clean_up_instance')
diff --git a/ironic/drivers/modules/inspect_utils.py b/ironic/drivers/modules/inspect_utils.py
index 432345cf1..f87fcc82b 100644
--- a/ironic/drivers/modules/inspect_utils.py
+++ b/ironic/drivers/modules/inspect_utils.py
@@ -68,101 +68,88 @@ def create_ports_if_not_exist(task, macs=None):
def clean_up_swift_entries(task):
- """Delete swift entries containing introspection data.
+ """Delete swift entries containing inspection data.
Delete swift entries related to the node in task.node containing
- introspection data. The entries are
+ inspection data. The entries are
``inspector_data-<task.node.uuid>-inventory`` for hardware inventory and
- similar for ``-plugin`` containing the rest of the introspection data.
+ similar for ``-plugin`` containing the rest of the inspection data.
:param task: A TaskManager instance.
"""
if CONF.inventory.data_backend != 'swift':
return
swift_api = swift.SwiftAPI()
- swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, task.node.uuid)
container = CONF.inventory.swift_data_container
- inventory_obj_name = swift_object_name + '-inventory'
- plugin_obj_name = swift_object_name + '-plugin'
+ inventory_obj_name = f'{_OBJECT_NAME_PREFIX}-{task.node.uuid}-inventory'
+ plugin_obj_name = f'{_OBJECT_NAME_PREFIX}-{task.node.uuid}-plugin'
try:
swift_api.delete_object(inventory_obj_name, container)
except swiftclient.exceptions.ClientException as e:
- if e.http_status == 404:
- # 404 -> entry did not exist - acceptable.
- pass
- else:
- LOG.error("Object %(obj)s related to node %(node)s "
- "failed to be deleted with expection: %(e)s",
+ if e.http_status != 404:
+ LOG.error("Object %(obj)s in container %(cont)s with inventory "
+ "for node %(node)s failed to be deleted: %(e)s",
{'obj': inventory_obj_name, 'node': task.node.uuid,
- 'e': e})
+ 'e': e, 'cont': container})
raise exception.SwiftObjectStillExists(obj=inventory_obj_name,
node=task.node.uuid)
try:
swift_api.delete_object(plugin_obj_name, container)
except swiftclient.exceptions.ClientException as e:
- if e.http_status == 404:
- # 404 -> entry did not exist - acceptable.
- pass
- else:
- LOG.error("Object %(obj)s related to node %(node)s "
- "failed to be deleted with exception: %(e)s",
+ if e.http_status != 404:
+ LOG.error("Object %(obj)s in container %(cont)s with plugin data "
+ "for node %(node)s failed to be deleted: %(e)s",
{'obj': plugin_obj_name, 'node': task.node.uuid,
- 'e': e})
+ 'e': e, 'cont': container})
raise exception.SwiftObjectStillExists(obj=plugin_obj_name,
node=task.node.uuid)
-def store_introspection_data(node, introspection_data, context):
- """Store introspection data.
+def store_inspection_data(node, inventory, plugin_data, context):
+ """Store inspection data.
- Store the introspection data for a node. Either to database
- or swift as configured.
+ Store the inspection data for a node. The storage is either the database
+ or the Object Storage API (swift/radosgw) as configured.
- :param node: the Ironic node that the introspection data is about
- :param introspection_data: the data to store
+ :param node: the Ironic node that the inspection data is about
+ :param inventory: the inventory to store
+ :param plugin_data: the plugin data (if any) to store
:param context: an admin context
"""
# If store_data == 'none', do not store the data
store_data = CONF.inventory.data_backend
if store_data == 'none':
- LOG.debug('Introspection data storage is disabled, the data will '
- 'not be saved for node %(node)s', {'node': node.uuid})
+ LOG.debug('Inspection data storage is disabled, the data will '
+ 'not be saved for node %s', node.uuid)
return
- inventory_data = introspection_data.pop("inventory")
- plugin_data = introspection_data
if store_data == 'database':
node_inventory.NodeInventory(
context,
node_id=node.id,
- inventory_data=inventory_data,
+ inventory_data=inventory,
plugin_data=plugin_data).create()
- LOG.info('Introspection data was stored in database for node '
- '%(node)s', {'node': node.uuid})
+ LOG.info('Inspection data was stored in database for node %s',
+ node.uuid)
if store_data == 'swift':
- swift_object_name = _store_introspection_data_in_swift(
+ swift_object_name = _store_inspection_data_in_swift(
node_uuid=node.uuid,
- inventory_data=inventory_data,
+ inventory_data=inventory,
plugin_data=plugin_data)
- LOG.info('Introspection data was stored for node %(node)s in Swift'
- ' object %(obj_name)s-inventory and %(obj_name)s-plugin',
+ LOG.info('Inspection data was stored in Swift for node %(node)s: '
+ 'objects %(obj_name)s-inventory and %(obj_name)s-plugin',
{'node': node.uuid, 'obj_name': swift_object_name})
-def _node_inventory_convert(node_inventory):
- inventory_data = node_inventory['inventory_data']
- plugin_data = node_inventory['plugin_data']
- return {"inventory": inventory_data, "plugin_data": plugin_data}
-
-
-def get_introspection_data(node, context):
- """Get introspection data.
+def get_inspection_data(node, context):
+ """Get inspection data.
- Retrieve the introspection data for a node. Either from database
- or swift as configured.
+ Retrieve the inspection data for a node either from database
+ or the Object Storage API (swift/radosgw) as configured.
- :param node_id: the Ironic node that the required data is about
+ :param node: the Ironic node that the required data is about
:param context: an admin context
:returns: dictionary with ``inventory`` and ``plugin_data`` fields
+ :raises: NodeInventoryNotFound if no inventory has been saved
"""
store_data = CONF.inventory.data_backend
if store_data == 'none':
@@ -170,58 +157,57 @@ def get_introspection_data(node, context):
if store_data == 'database':
node_inventory = objects.NodeInventory.get_by_node_id(
context, node.id)
- return _node_inventory_convert(node_inventory)
+ return {"inventory": node_inventory.inventory_data,
+ "plugin_data": node_inventory.plugin_data}
if store_data == 'swift':
try:
- node_inventory = _get_introspection_data_from_swift(node.uuid)
+ return _get_inspection_data_from_swift(node.uuid)
except exception.SwiftObjectNotFoundError:
raise exception.NodeInventoryNotFound(node=node.uuid)
- return node_inventory
-def _store_introspection_data_in_swift(node_uuid, inventory_data, plugin_data):
- """Uploads introspection data to Swift.
+def _store_inspection_data_in_swift(node_uuid, inventory_data, plugin_data):
+ """Uploads inspection data to Swift.
:param data: data to store in Swift
:param node_id: ID of the Ironic node that the data came from
:returns: name of the Swift object that the data is stored in
"""
swift_api = swift.SwiftAPI()
- swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
+ swift_object_name = f'{_OBJECT_NAME_PREFIX}-{node_uuid}'
container = CONF.inventory.swift_data_container
- swift_api.create_object_from_data(swift_object_name + '-inventory',
+ swift_api.create_object_from_data(f'{swift_object_name}-inventory',
inventory_data,
container)
- swift_api.create_object_from_data(swift_object_name + '-plugin',
+ swift_api.create_object_from_data(f'{swift_object_name}-plugin',
plugin_data,
container)
return swift_object_name
-def _get_introspection_data_from_swift(node_uuid):
- """Get introspection data from Swift.
+def _get_inspection_data_from_swift(node_uuid):
+ """Get inspection data from Swift.
:param node_uuid: UUID of the Ironic node that the data came from
:returns: dictionary with ``inventory`` and ``plugin_data`` fields
"""
swift_api = swift.SwiftAPI()
- swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
container = CONF.inventory.swift_data_container
- inv_obj = swift_object_name + '-inventory'
- plug_obj = swift_object_name + '-plugin'
+ inv_obj = f'{_OBJECT_NAME_PREFIX}-{node_uuid}-inventory'
+ plug_obj = f'{_OBJECT_NAME_PREFIX}-{node_uuid}-plugin'
try:
inventory_data = swift_api.get_object(inv_obj, container)
except exception.SwiftOperationError:
- LOG.error("Failed to retrieve object %(obj)s from swift",
- {'obj': inv_obj})
+ LOG.error("Failed to retrieve object %(obj)s from container %(cont)s",
+ {'obj': inv_obj, 'cont': container})
raise exception.SwiftObjectNotFoundError(obj=inv_obj,
container=container,
operation='get')
try:
plugin_data = swift_api.get_object(plug_obj, container)
except exception.SwiftOperationError:
- LOG.error("Failed to retrieve object %(obj)s from swift",
- {'obj': plug_obj})
+ LOG.error("Failed to retrieve object %(obj)s from container %(cont)s",
+ {'obj': plug_obj, 'cont': container})
raise exception.SwiftObjectNotFoundError(obj=plug_obj,
container=container,
operation='get')
diff --git a/ironic/drivers/modules/inspector/interface.py b/ironic/drivers/modules/inspector/interface.py
index 8792b7b88..e72077003 100644
--- a/ironic/drivers/modules/inspector/interface.py
+++ b/ironic/drivers/modules/inspector/interface.py
@@ -299,15 +299,17 @@ def _check_status(task):
_inspection_error_handler(task, error)
elif status.is_finished:
_clean_up(task)
- store_data = CONF.inventory.data_backend
- if store_data == 'none':
- LOG.debug('Introspection data storage is disabled, the data will '
- 'not be saved for node %(node)s', {'node': node.uuid})
+ if CONF.inventory.data_backend == 'none':
+ LOG.debug('Inspection data storage is disabled, the data will '
+ 'not be saved for node %s', node.uuid)
return
introspection_data = inspector_client.get_introspection_data(
node.uuid, processed=True)
- inspect_utils.store_introspection_data(node, introspection_data,
- task.context)
+ # TODO(dtantsur): having no inventory is an abnormal state, handle it.
+ inventory = introspection_data.pop('inventory', {})
+ inspect_utils.store_inspection_data(node, inventory,
+ introspection_data,
+ task.context)
def _clean_up(task):
diff --git a/ironic/tests/base.py b/ironic/tests/base.py
index 348f15c20..69e449d3b 100644
--- a/ironic/tests/base.py
+++ b/ironic/tests/base.py
@@ -102,6 +102,11 @@ class WarningsFixture(fixtures.Fixture):
category=UserWarning,
)
+ # NOTE(gibi): The UUIDFields emits a warning if the value is not a
+ # valid UUID. Let's escalate that to an exception in the test to
+ # prevent adding violations.
+ warnings.filterwarnings('error', message='.* is an invalid UUID.')
+
# Enable deprecation warnings to capture upcoming SQLAlchemy changes
warnings.filterwarnings(
@@ -125,16 +130,6 @@ class WarningsFixture(fixtures.Fixture):
category=sqla_exc.SAWarning,
)
- # ...but filter everything out until we get around to fixing them
- # TODO(stephenfin): Fix all of these
-
- warnings.filterwarnings(
- 'ignore',
- module='ironic',
- message='SELECT statement has a cartesian product ',
- category=sqla_exc.SAWarning,
- )
-
# FIXME(stephenfin): We can remove this once oslo.db is fixed
# https://review.opendev.org/c/openstack/oslo.db/+/856453
warnings.filterwarnings(
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index 6f2c1fc97..87a029057 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -7985,20 +7985,15 @@ class TestNodeInventory(test_api_base.BaseApiTest):
self.node = obj_utils.create_test_node(
self.context,
provision_state=states.AVAILABLE, name='node-81')
- self.node.save()
- self.node.obj_reset_changes()
-
- def _add_inventory(self):
- self.inventory = objects.NodeInventory(
- node_id=self.node.id, inventory_data=self.fake_inventory_data,
- plugin_data=self.fake_plugin_data)
- self.inventory.create()
+ CONF.set_override('data_backend', 'database', group='inventory')
- def test_get_old_version(self):
+ @mock.patch.object(inspect_utils, 'get_inspection_data', autospec=True)
+ def test_get_old_version(self, mock_get):
ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
headers={api_base.Version.string: "1.80"},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, ret.status_code)
+ mock_get.assert_not_called()
def test_get_inventory_no_inventory(self):
ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
@@ -8007,33 +8002,10 @@ class TestNodeInventory(test_api_base.BaseApiTest):
self.assertEqual(http_client.NOT_FOUND, ret.status_code)
def test_get_inventory(self):
- self._add_inventory()
- CONF.set_override('data_backend', 'database',
- group='inventory')
- ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
- headers={api_base.Version.string: self.version})
- self.assertEqual({'inventory': self.fake_inventory_data,
- 'plugin_data': self.fake_plugin_data}, ret)
-
- @mock.patch.object(inspect_utils, 'get_introspection_data',
- autospec=True)
- def test_get_inventory_exception(self, mock_get_data):
- CONF.set_override('data_backend', 'database',
- group='inventory')
- mock_get_data.side_effect = [
- exception.NodeInventoryNotFound]
- ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
- headers={api_base.Version.string: self.version},
- expect_errors=True)
- self.assertEqual(http_client.NOT_FOUND, ret.status_int)
-
- @mock.patch.object(inspect_utils, '_get_introspection_data_from_swift',
- autospec=True)
- def test_get_inventory_swift(self, mock_get_data):
- CONF.set_override('data_backend', 'swift',
- group='inventory')
- mock_get_data.return_value = {"inventory": self.fake_inventory_data,
- "plugin_data": self.fake_plugin_data}
+ obj_utils.create_test_inventory(
+ self.context, self.node,
+ inventory_data=self.fake_inventory_data,
+ plugin_data=self.fake_plugin_data)
ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
headers={api_base.Version.string: self.version})
self.assertEqual({'inventory': self.fake_inventory_data,
diff --git a/ironic/tests/unit/common/test_image_service.py b/ironic/tests/unit/common/test_image_service.py
index 197518e1a..297a1b4b9 100644
--- a/ironic/tests/unit/common/test_image_service.py
+++ b/ironic/tests/unit/common/test_image_service.py
@@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import builtins
import datetime
from http import client as http_client
import io
@@ -483,119 +482,55 @@ class FileImageServiceTestCase(base.TestCase):
'properties': {},
'no_cache': True}, result)
+ @mock.patch.object(shutil, 'copyfile', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'remove', autospec=True)
- @mock.patch.object(os, 'access', return_value=True, autospec=True)
- @mock.patch.object(os, 'stat', autospec=True)
@mock.patch.object(image_service.FileImageService, 'validate_href',
autospec=True)
- def test_download_hard_link(self, _validate_mock, stat_mock, access_mock,
- remove_mock, link_mock):
+ def test_download_hard_link(self, _validate_mock, remove_mock, link_mock,
+ copy_mock):
_validate_mock.return_value = self.href_path
- stat_mock.return_value.st_dev = 'dev1'
file_mock = mock.Mock(spec=io.BytesIO)
file_mock.name = 'file'
self.service.download(self.href, file_mock)
_validate_mock.assert_called_once_with(mock.ANY, self.href)
- self.assertEqual(2, stat_mock.call_count)
- access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
remove_mock.assert_called_once_with('file')
link_mock.assert_called_once_with(self.href_path, 'file')
+ copy_mock.assert_not_called()
- @mock.patch.object(os, 'sendfile', return_value=42, autospec=True)
- @mock.patch.object(os.path, 'getsize', return_value=42, autospec=True)
- @mock.patch.object(builtins, 'open', autospec=True)
- @mock.patch.object(os, 'access', return_value=False, autospec=True)
- @mock.patch.object(os, 'stat', autospec=True)
- @mock.patch.object(image_service.FileImageService, 'validate_href',
- autospec=True)
- def test_download_copy(self, _validate_mock, stat_mock, access_mock,
- open_mock, size_mock, copy_mock):
- _validate_mock.return_value = self.href_path
- stat_mock.return_value.st_dev = 'dev1'
- file_mock = mock.MagicMock(spec=io.BytesIO)
- file_mock.name = 'file'
- input_mock = mock.MagicMock(spec=io.BytesIO)
- open_mock.return_value = input_mock
- self.service.download(self.href, file_mock)
- _validate_mock.assert_called_once_with(mock.ANY, self.href)
- self.assertEqual(2, stat_mock.call_count)
- access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
- copy_mock.assert_called_once_with(file_mock.fileno(),
- input_mock.__enter__().fileno(),
- 0, 42)
-
- @mock.patch.object(os, 'sendfile', autospec=True)
- @mock.patch.object(os.path, 'getsize', return_value=42, autospec=True)
- @mock.patch.object(builtins, 'open', autospec=True)
- @mock.patch.object(os, 'access', return_value=False, autospec=True)
- @mock.patch.object(os, 'stat', autospec=True)
+ @mock.patch.object(shutil, 'copyfile', autospec=True)
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'remove', autospec=True)
@mock.patch.object(image_service.FileImageService, 'validate_href',
autospec=True)
- def test_download_copy_segmented(self, _validate_mock, stat_mock,
- access_mock, open_mock, size_mock,
- copy_mock):
- # Fake a 3G + 1k image
- chunk_size = image_service.SENDFILE_CHUNK_SIZE
- fake_image_size = chunk_size * 3 + 1024
- fake_chunk_seq = [chunk_size, chunk_size, chunk_size, 1024]
+ def test_download_copy(self, _validate_mock, remove_mock, link_mock,
+ copy_mock):
_validate_mock.return_value = self.href_path
- stat_mock.return_value.st_dev = 'dev1'
+ link_mock.side_effect = PermissionError
file_mock = mock.MagicMock(spec=io.BytesIO)
file_mock.name = 'file'
- input_mock = mock.MagicMock(spec=io.BytesIO)
- open_mock.return_value = input_mock
- size_mock.return_value = fake_image_size
- copy_mock.side_effect = fake_chunk_seq
self.service.download(self.href, file_mock)
_validate_mock.assert_called_once_with(mock.ANY, self.href)
- self.assertEqual(2, stat_mock.call_count)
- access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
- copy_calls = [mock.call(file_mock.fileno(),
- input_mock.__enter__().fileno(),
- chunk_size * i,
- fake_chunk_seq[i]) for i in range(4)]
- copy_mock.assert_has_calls(copy_calls)
- size_mock.assert_called_once_with(self.href_path)
-
- @mock.patch.object(os, 'remove', side_effect=OSError, autospec=True)
- @mock.patch.object(os, 'access', return_value=True, autospec=True)
- @mock.patch.object(os, 'stat', autospec=True)
- @mock.patch.object(image_service.FileImageService, 'validate_href',
- autospec=True)
- def test_download_hard_link_fail(self, _validate_mock, stat_mock,
- access_mock, remove_mock):
- _validate_mock.return_value = self.href_path
- stat_mock.return_value.st_dev = 'dev1'
- file_mock = mock.MagicMock(spec=io.BytesIO)
- file_mock.name = 'file'
- self.assertRaises(exception.ImageDownloadFailed,
- self.service.download, self.href, file_mock)
- _validate_mock.assert_called_once_with(mock.ANY, self.href)
- self.assertEqual(2, stat_mock.call_count)
- access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
+ link_mock.assert_called_once_with(self.href_path, 'file')
+ copy_mock.assert_called_once_with(self.href_path, 'file')
- @mock.patch.object(os, 'sendfile', side_effect=OSError, autospec=True)
- @mock.patch.object(os.path, 'getsize', return_value=42, autospec=True)
- @mock.patch.object(builtins, 'open', autospec=True)
- @mock.patch.object(os, 'access', return_value=False, autospec=True)
- @mock.patch.object(os, 'stat', autospec=True)
+ @mock.patch.object(shutil, 'copyfile', autospec=True)
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'remove', autospec=True)
@mock.patch.object(image_service.FileImageService, 'validate_href',
autospec=True)
- def test_download_copy_fail(self, _validate_mock, stat_mock, access_mock,
- open_mock, size_mock, copy_mock):
+ def test_download_copy_fail(self, _validate_mock, remove_mock, link_mock,
+ copy_mock):
_validate_mock.return_value = self.href_path
- stat_mock.return_value.st_dev = 'dev1'
+ link_mock.side_effect = PermissionError
+ copy_mock.side_effect = PermissionError
file_mock = mock.MagicMock(spec=io.BytesIO)
file_mock.name = 'file'
- input_mock = mock.MagicMock(spec=io.BytesIO)
- open_mock.return_value = input_mock
self.assertRaises(exception.ImageDownloadFailed,
self.service.download, self.href, file_mock)
_validate_mock.assert_called_once_with(mock.ANY, self.href)
- self.assertEqual(2, stat_mock.call_count)
- access_mock.assert_called_once_with(self.href_path, os.R_OK | os.W_OK)
- size_mock.assert_called_once_with(self.href_path)
+ link_mock.assert_called_once_with(self.href_path, 'file')
+ copy_mock.assert_called_once_with(self.href_path, 'file')
class ServiceGetterTestCase(base.TestCase):
diff --git a/ironic/tests/unit/common/test_kickstart_utils.py b/ironic/tests/unit/common/test_kickstart_utils.py
index 0dd1ac572..db6123b9d 100644
--- a/ironic/tests/unit/common/test_kickstart_utils.py
+++ b/ironic/tests/unit/common/test_kickstart_utils.py
@@ -129,4 +129,5 @@ echo $CONTENT | /usr/bin/base64 --decode > {file_path}\n\
task.node.instance_info = i_info
task.node.save()
self.assertEqual(expected, ks_utils.prepare_config_drive(task))
- mock_get.assert_called_with('http://server/fake-configdrive-url')
+ mock_get.assert_called_with('http://server/fake-configdrive-url',
+ timeout=60)
diff --git a/ironic/tests/unit/common/test_molds.py b/ironic/tests/unit/common/test_molds.py
index 810dd61bc..2323c2fa8 100644
--- a/ironic/tests/unit/common/test_molds.py
+++ b/ironic/tests/unit/common/test_molds.py
@@ -46,7 +46,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
molds.save_configuration(task, url, data)
mock_put.assert_called_once_with(url, '{\n "key": "value"\n}',
- headers={'X-Auth-Token': 'token'})
+ headers={'X-Auth-Token': 'token'},
+ timeout=60)
@mock.patch.object(swift, 'get_swift_session', autospec=True)
@mock.patch.object(requests, 'put', autospec=True)
@@ -77,7 +78,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
mock_put.assert_called_once_with(
url, '{\n "key": "value"\n}',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_http_noauth(self, mock_put):
@@ -91,7 +93,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
molds.save_configuration(task, url, data)
mock_put.assert_called_once_with(
url, '{\n "key": "value"\n}',
- headers=None)
+ headers=None,
+ timeout=60)
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_http_error(self, mock_put):
@@ -112,7 +115,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
{'key': 'value'})
mock_put.assert_called_once_with(
'https://example.com/file2', '{\n "key": "value"\n}',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_connection_error(self, mock_put):
@@ -132,7 +136,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
task, 'https://example.com/file2', {'key': 'value'})
mock_put.assert_called_with(
'https://example.com/file2', '{\n "key": "value"\n}',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertEqual(mock_put.call_count, 3)
@mock.patch.object(requests, 'put', autospec=True)
@@ -155,7 +160,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
{'key': 'value'})
mock_put.assert_called_with(
'https://example.com/file2', '{\n "key": "value"\n}',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertEqual(mock_put.call_count, 2)
@mock.patch.object(swift, 'get_swift_session', autospec=True)
@@ -176,7 +182,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
result = molds.get_configuration(task, url)
mock_get.assert_called_once_with(
- url, headers={'X-Auth-Token': 'token'})
+ url, headers={'X-Auth-Token': 'token'},
+ timeout=60)
self.assertJsonEqual({'key': 'value'}, result)
@mock.patch.object(swift, 'get_swift_session', autospec=True)
@@ -210,7 +217,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
result = molds.get_configuration(task, url)
mock_get.assert_called_once_with(
- url, headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ url, headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertJsonEqual({"key": "value"}, result)
@mock.patch.object(requests, 'get', autospec=True)
@@ -228,7 +236,7 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid) as task:
result = molds.get_configuration(task, url)
- mock_get.assert_called_once_with(url, headers=None)
+ mock_get.assert_called_once_with(url, headers=None, timeout=60)
self.assertJsonEqual({"key": "value"}, result)
@mock.patch.object(requests, 'get', autospec=True)
@@ -249,7 +257,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
'https://example.com/file2')
mock_get.assert_called_once_with(
'https://example.com/file2',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_connection_error(self, mock_get):
@@ -269,7 +278,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
task, 'https://example.com/file2')
mock_get.assert_called_with(
'https://example.com/file2',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertEqual(mock_get.call_count, 3)
@mock.patch.object(requests, 'get', autospec=True)
@@ -291,7 +301,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
'https://example.com/file2')
mock_get.assert_called_with(
'https://example.com/file2',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertEqual(mock_get.call_count, 2)
@mock.patch.object(requests, 'get', autospec=True)
diff --git a/ironic/tests/unit/common/test_rpc_service.py b/ironic/tests/unit/common/test_rpc_service.py
index 09446ecf8..752b7665a 100644
--- a/ironic/tests/unit/common/test_rpc_service.py
+++ b/ironic/tests/unit/common/test_rpc_service.py
@@ -22,6 +22,7 @@ from oslo_utils import timeutils
from ironic.common import context
from ironic.common import rpc
from ironic.common import rpc_service
+from ironic.common import service as ironic_service
from ironic.conductor import manager
from ironic.objects import base as objects_base
from ironic.tests.unit.db import base as db_base
@@ -39,6 +40,8 @@ class TestRPCService(db_base.DbTestCase):
mgr_module = "ironic.conductor.manager"
mgr_class = "ConductorManager"
self.rpc_svc = rpc_service.RPCService(host, mgr_module, mgr_class)
+ # register oslo_service DEFAULT config options
+ ironic_service.process_launcher()
self.rpc_svc.manager.dbapi = self.dbapi
@mock.patch.object(manager.ConductorManager, 'prepare_host', autospec=True)
@@ -123,7 +126,10 @@ class TestRPCService(db_base.DbTestCase):
with mock.patch.object(self.dbapi, 'get_online_conductors',
autospec=True) as mock_cond_list:
mock_cond_list.return_value = [conductor1]
- self.rpc_svc.stop()
+ with mock.patch.object(self.dbapi, 'get_nodeinfo_list',
+ autospec=True) as mock_nodeinfo_list:
+ mock_nodeinfo_list.return_value = []
+ self.rpc_svc.stop()
# single conductor so exit immediately without waiting
mock_sleep.assert_not_called()
@@ -139,7 +145,11 @@ class TestRPCService(db_base.DbTestCase):
autospec=True) as mock_cond_list:
# multiple conductors, so wait for hash_ring_reset_interval
mock_cond_list.return_value = [conductor1, conductor2]
- self.rpc_svc.stop()
+ with mock.patch.object(self.dbapi, 'get_nodeinfo_list',
+ autospec=True) as mock_nodeinfo_list:
+ mock_nodeinfo_list.return_value = []
+ self.rpc_svc.stop()
+ mock_nodeinfo_list.assert_called_once()
# wait the total CONF.hash_ring_reset_interval 15 seconds
mock_sleep.assert_has_calls([mock.call(15)])
@@ -160,7 +170,11 @@ class TestRPCService(db_base.DbTestCase):
autospec=True) as mock_cond_list:
# multiple conductors, so wait for hash_ring_reset_interval
mock_cond_list.return_value = [conductor1, conductor2]
- self.rpc_svc.stop()
+ with mock.patch.object(self.dbapi, 'get_nodeinfo_list',
+ autospec=True) as mock_nodeinfo_list:
+ mock_nodeinfo_list.return_value = []
+ self.rpc_svc.stop()
+ mock_nodeinfo_list.assert_called_once()
# wait the remaining 10 seconds
mock_sleep.assert_has_calls([mock.call(10)])
@@ -181,7 +195,35 @@ class TestRPCService(db_base.DbTestCase):
autospec=True) as mock_cond_list:
# multiple conductors, so wait for hash_ring_reset_interval
mock_cond_list.return_value = [conductor1, conductor2]
- self.rpc_svc.stop()
+ with mock.patch.object(self.dbapi, 'get_nodeinfo_list',
+ autospec=True) as mock_nodeinfo_list:
+ mock_nodeinfo_list.return_value = []
+ self.rpc_svc.stop()
+ mock_nodeinfo_list.assert_called_once()
# no wait required, CONF.hash_ring_reset_interval already exceeded
mock_sleep.assert_not_called()
+
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_stop_has_reserved(self, mock_sleep, mock_utcnow):
+ mock_utcnow.return_value = datetime.datetime(2023, 2, 2, 21, 10, 0)
+ conductor1 = db_utils.get_test_conductor(hostname='fake_host')
+ conductor2 = db_utils.get_test_conductor(hostname='other_fake_host')
+
+ with mock.patch.object(self.dbapi, 'get_online_conductors',
+ autospec=True) as mock_cond_list:
+ # multiple conductors, so wait for hash_ring_reset_interval
+ mock_cond_list.return_value = [conductor1, conductor2]
+ with mock.patch.object(self.dbapi, 'get_nodeinfo_list',
+ autospec=True) as mock_nodeinfo_list:
+ # 3 calls to manager has_reserved until all reservation locks
+ # are released
+ mock_nodeinfo_list.side_effect = [['a', 'b'], ['a'], []]
+ self.rpc_svc.stop()
+ self.assertEqual(3, mock_nodeinfo_list.call_count)
+
+ # wait the remaining 15 seconds, then wait until has_reserved
+ # returns False
+ mock_sleep.assert_has_calls(
+ [mock.call(15), mock.call(1), mock.call(1)])
diff --git a/ironic/tests/unit/conductor/test_cleaning.py b/ironic/tests/unit/conductor/test_cleaning.py
index 34e805deb..cdfbf14ee 100644
--- a/ironic/tests/unit/conductor/test_cleaning.py
+++ b/ironic/tests/unit/conductor/test_cleaning.py
@@ -436,6 +436,36 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
self.assertFalse(node.maintenance)
self.assertIsNone(node.fault)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.set_power_state',
+ autospec=True)
+ @mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
+ @mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
+ autospec=True)
+ def test_do_node_clean_steps_fail_poweroff(self, mock_steps, mock_validate,
+ mock_power, clean_steps=None,
+ invalid_exc=True):
+ if invalid_exc:
+ mock_steps.side_effect = exception.InvalidParameterValue('invalid')
+ else:
+ mock_steps.side_effect = exception.NodeCleaningFailure('failure')
+ tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
+ self.config(poweroff_in_cleanfail=True, group='conductor')
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ uuid=uuidutils.generate_uuid(),
+ provision_state=states.CLEANING,
+ power_state=states.POWER_ON,
+ target_provision_state=tgt_prov_state)
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_node_clean(task, clean_steps=clean_steps)
+ mock_validate.assert_called_once_with(mock.ANY, task)
+ node.refresh()
+ self.assertEqual(states.CLEANFAIL, node.provision_state)
+ self.assertEqual(tgt_prov_state, node.target_provision_state)
+ mock_steps.assert_called_once_with(mock.ANY, disable_ramdisk=False)
+ self.assertTrue(mock_power.called)
+
def test__do_node_clean_automated_steps_fail(self):
for invalid in (True, False):
self.__do_node_clean_steps_fail(invalid_exc=invalid)
diff --git a/ironic/tests/unit/conductor/test_inspection.py b/ironic/tests/unit/conductor/test_inspection.py
new file mode 100644
index 000000000..c64b883e4
--- /dev/null
+++ b/ironic/tests/unit/conductor/test_inspection.py
@@ -0,0 +1,118 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from ironic.common import exception
+from ironic.common import states
+from ironic.conductor import inspection
+from ironic.conductor import task_manager
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.objects import utils as obj_utils
+
+
+@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
+ autospec=True)
+class TestInspectHardware(db_base.DbTestCase):
+
+ def test_inspect_hardware_ok(self, mock_inspect):
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING,
+ driver_internal_info={'agent_url': 'url',
+ 'agent_secret_token': 'token'})
+ task = task_manager.TaskManager(self.context, node.uuid)
+ mock_inspect.return_value = states.MANAGEABLE
+ inspection.inspect_hardware(task)
+ node.refresh()
+ self.assertEqual(states.MANAGEABLE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertIsNone(node.last_error)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
+ task.node.refresh()
+ self.assertNotIn('agent_url', task.node.driver_internal_info)
+ self.assertNotIn('agent_secret_token', task.node.driver_internal_info)
+
+ def test_inspect_hardware_return_inspecting(self, mock_inspect):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING)
+ task = task_manager.TaskManager(self.context, node.uuid)
+ mock_inspect.return_value = states.INSPECTING
+ self.assertRaises(exception.HardwareInspectionFailure,
+ inspection.inspect_hardware, task)
+
+ node.refresh()
+ self.assertIn('driver returned unexpected state', node.last_error)
+ self.assertEqual(states.INSPECTFAIL, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
+
+ def test_inspect_hardware_return_inspect_wait(self, mock_inspect):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING)
+ task = task_manager.TaskManager(self.context, node.uuid)
+ mock_inspect.return_value = states.INSPECTWAIT
+ inspection.inspect_hardware(task)
+ node.refresh()
+ self.assertEqual(states.INSPECTWAIT, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ self.assertIsNone(node.last_error)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
+
+ @mock.patch.object(inspection, 'LOG', autospec=True)
+ def test_inspect_hardware_return_other_state(self, log_mock, mock_inspect):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING)
+ task = task_manager.TaskManager(self.context, node.uuid)
+ mock_inspect.return_value = None
+ self.assertRaises(exception.HardwareInspectionFailure,
+ inspection.inspect_hardware, task)
+ node.refresh()
+ self.assertEqual(states.INSPECTFAIL, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
+ self.assertTrue(log_mock.error.called)
+
+ def test_inspect_hardware_raises_error(self, mock_inspect):
+ mock_inspect.side_effect = exception.HardwareInspectionFailure('test')
+ state = states.MANAGEABLE
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING,
+ target_provision_state=state)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ self.assertRaisesRegex(exception.HardwareInspectionFailure, '^test$',
+ inspection.inspect_hardware, task)
+ node.refresh()
+ self.assertEqual(states.INSPECTFAIL, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ self.assertEqual('test', node.last_error)
+ self.assertTrue(mock_inspect.called)
+
+ def test_inspect_hardware_unexpected_error(self, mock_inspect):
+ mock_inspect.side_effect = RuntimeError('x')
+ state = states.MANAGEABLE
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING,
+ target_provision_state=state)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ self.assertRaisesRegex(exception.HardwareInspectionFailure,
+ 'Unexpected exception of type RuntimeError: x',
+ inspection.inspect_hardware, task)
+ node.refresh()
+ self.assertEqual(states.INSPECTFAIL, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ self.assertEqual('Unexpected exception of type RuntimeError: x',
+ node.last_error)
+ self.assertTrue(mock_inspect.called)
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index 6a6f7e08f..5244e4c4b 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -46,6 +46,7 @@ from ironic.common import nova
from ironic.common import states
from ironic.conductor import cleaning
from ironic.conductor import deployments
+from ironic.conductor import inspection
from ironic.conductor import manager
from ironic.conductor import notification_utils
from ironic.conductor import steps as conductor_steps
@@ -5088,7 +5089,8 @@ class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
self.power = self.driver.power
self.node = obj_utils.create_test_node(
self.context, driver='fake-hardware', maintenance=False,
- provision_state=states.AVAILABLE, instance_uuid=uuidutils.uuid)
+ provision_state=states.AVAILABLE,
+ instance_uuid=uuidutils.generate_uuid())
self.task = mock.Mock(spec_set=['context', 'driver', 'node',
'upgrade_lock', 'shared'])
self.task.context = self.context
@@ -6461,77 +6463,6 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
@mgr_utils.mock_record_keepalive
class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_ok(self, mock_inspect):
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.INSPECTING,
- driver_internal_info={'agent_url': 'url',
- 'agent_secret_token': 'token'})
- task = task_manager.TaskManager(self.context, node.uuid)
- mock_inspect.return_value = states.MANAGEABLE
- manager._do_inspect_hardware(task)
- node.refresh()
- self.assertEqual(states.MANAGEABLE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertIsNone(node.last_error)
- mock_inspect.assert_called_once_with(task.driver.inspect, task)
- task.node.refresh()
- self.assertNotIn('agent_url', task.node.driver_internal_info)
- self.assertNotIn('agent_secret_token', task.node.driver_internal_info)
-
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_return_inspecting(self, mock_inspect):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING)
- task = task_manager.TaskManager(self.context, node.uuid)
- mock_inspect.return_value = states.INSPECTING
- self.assertRaises(exception.HardwareInspectionFailure,
- manager._do_inspect_hardware, task)
-
- node.refresh()
- self.assertIn('driver returned unexpected state', node.last_error)
- self.assertEqual(states.INSPECTFAIL, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- mock_inspect.assert_called_once_with(task.driver.inspect, task)
-
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_return_inspect_wait(self, mock_inspect):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING)
- task = task_manager.TaskManager(self.context, node.uuid)
- mock_inspect.return_value = states.INSPECTWAIT
- manager._do_inspect_hardware(task)
- node.refresh()
- self.assertEqual(states.INSPECTWAIT, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- self.assertIsNone(node.last_error)
- mock_inspect.assert_called_once_with(task.driver.inspect, task)
-
- @mock.patch.object(manager, 'LOG', autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_return_other_state(self, mock_inspect, log_mock):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING)
- task = task_manager.TaskManager(self.context, node.uuid)
- mock_inspect.return_value = None
- self.assertRaises(exception.HardwareInspectionFailure,
- manager._do_inspect_hardware, task)
- node.refresh()
- self.assertEqual(states.INSPECTFAIL, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- mock_inspect.assert_called_once_with(task.driver.inspect, task)
- self.assertTrue(log_mock.error.called)
-
def test__check_inspect_wait_timeouts(self):
self._start_service()
CONF.set_override('inspect_wait_timeout', 1, group='conductor')
@@ -6609,46 +6540,6 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_inspect_hardware_power_validate_fail(self, mock_validate):
self._test_inspect_hardware_validate_fail(mock_validate)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_raises_error(self, mock_inspect):
- self._start_service()
- mock_inspect.side_effect = exception.HardwareInspectionFailure('test')
- state = states.MANAGEABLE
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING,
- target_provision_state=state)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.assertRaisesRegex(exception.HardwareInspectionFailure, '^test$',
- manager._do_inspect_hardware, task)
- node.refresh()
- self.assertEqual(states.INSPECTFAIL, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- self.assertEqual('test', node.last_error)
- self.assertTrue(mock_inspect.called)
-
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_unexpected_error(self, mock_inspect):
- self._start_service()
- mock_inspect.side_effect = RuntimeError('x')
- state = states.MANAGEABLE
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING,
- target_provision_state=state)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.assertRaisesRegex(exception.HardwareInspectionFailure,
- 'Unexpected exception of type RuntimeError: x',
- manager._do_inspect_hardware, task)
- node.refresh()
- self.assertEqual(states.INSPECTFAIL, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- self.assertEqual('Unexpected exception of type RuntimeError: x',
- node.last_error)
- self.assertTrue(mock_inspect.called)
-
@mock.patch.object(conductor_utils, 'node_history_record',
mock.Mock(spec=conductor_utils.node_history_record))
@@ -8247,7 +8138,7 @@ class NodeTraitsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
- @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch.object(inspection, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_interface_not_support(self, mock_acquire,
@@ -8268,7 +8159,7 @@ class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
exc.exc_info[0])
self.assertTrue(mock_log.error.called)
- @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch.object(inspection, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_interface_return_failed(self, mock_acquire,
diff --git a/ironic/tests/unit/conductor/test_utils.py b/ironic/tests/unit/conductor/test_utils.py
index 52fc72436..27c4bfa86 100644
--- a/ironic/tests/unit/conductor/test_utils.py
+++ b/ironic/tests/unit/conductor/test_utils.py
@@ -243,11 +243,12 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.config(host='my-host')
# Required for exception handling
mock_notif.__name__ = 'NodeSetPowerStateNotification'
- node = obj_utils.create_test_node(self.context,
- uuid=uuidutils.generate_uuid(),
- driver='fake-hardware',
- instance_uuid=uuidutils.uuid,
- power_state=states.POWER_OFF)
+ node = obj_utils.create_test_node(
+ self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='fake-hardware',
+ instance_uuid=uuidutils.generate_uuid(),
+ power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
get_power_mock.return_value = states.POWER_OFF
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_boot.py b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
index 8aa6f78da..8ebaa14fa 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
@@ -1132,6 +1132,45 @@ class IloPXEBootTestCase(test_common.BaseIloTest):
self.assertIsNone(task.node.driver_internal_info.get(
'ilo_uefi_iscsi_boot'))
+ @mock.patch.object(ilo_boot, 'prepare_node_for_deploy', spec_set=True,
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
+ @mock.patch.object(deploy_utils, 'is_iscsi_boot',
+ spec_set=True, autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_instance', spec_set=True,
+ autospec=True)
+ def _test_prepare_instance_anaconda(self, pxe_prepare_instance_mock,
+ update_boot_mode_mock,
+ get_boot_mode_mock,
+ is_iscsi_boot_mock,
+ mock_get_boot_opt,
+ mock_prep_node_fr_deploy, prov_state):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.provision_state = prov_state
+ mock_get_boot_opt.return_value = 'kickstart'
+ is_iscsi_boot_mock.return_value = False
+ get_boot_mode_mock.return_value = 'uefi'
+ task.driver.boot.prepare_instance(task)
+ update_boot_mode_mock.assert_called_once_with(task)
+ pxe_prepare_instance_mock.assert_called_once_with(mock.ANY, task)
+ self.assertIsNone(task.node.driver_internal_info.get(
+ 'ilo_uefi_iscsi_boot'))
+ mock_prep_node_fr_deploy.assert_called_once_with(task)
+
+ def test_prepare_instance_anaconda_deploying(self):
+ self._test_prepare_instance_anaconda(prov_state=states.DEPLOYING)
+
+ def test_prepare_instance_anaconda_rescuing(self):
+ self._test_prepare_instance_anaconda(prov_state=states.RESCUING)
+
+ def test_prepare_instance_anaconda_cleaning(self):
+ self._test_prepare_instance_anaconda(prov_state=states.CLEANING)
+
@mock.patch.object(deploy_utils, 'is_iscsi_boot',
spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
@@ -1299,6 +1338,45 @@ class IloiPXEBootTestCase(test_common.BaseIloTest):
self.assertIsNone(task.node.driver_internal_info.get(
'ilo_uefi_iscsi_boot'))
+ @mock.patch.object(ilo_boot, 'prepare_node_for_deploy', spec_set=True,
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
+ @mock.patch.object(deploy_utils, 'is_iscsi_boot',
+ spec_set=True, autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
+ autospec=True)
+ @mock.patch.object(ipxe.iPXEBoot, 'prepare_instance', spec_set=True,
+ autospec=True)
+ def _test_prepare_instance_anaconda(self, pxe_prepare_instance_mock,
+ update_boot_mode_mock,
+ get_boot_mode_mock,
+ is_iscsi_boot_mock,
+ mock_get_boot_opt,
+ mock_prep_node_fr_deploy, prov_state):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.provision_state = prov_state
+ mock_get_boot_opt.return_value = 'kickstart'
+ is_iscsi_boot_mock.return_value = False
+ get_boot_mode_mock.return_value = 'uefi'
+ task.driver.boot.prepare_instance(task)
+ update_boot_mode_mock.assert_called_once_with(task)
+ pxe_prepare_instance_mock.assert_called_once_with(mock.ANY, task)
+ self.assertIsNone(task.node.driver_internal_info.get(
+ 'ilo_uefi_iscsi_boot'))
+ mock_prep_node_fr_deploy.assert_called_once_with(task)
+
+ def test_prepare_instance_anaconda_deploying(self):
+ self._test_prepare_instance_anaconda(prov_state=states.DEPLOYING)
+
+ def test_prepare_instance_anaconda_rescuing(self):
+ self._test_prepare_instance_anaconda(prov_state=states.RESCUING)
+
+ def test_prepare_instance_anaconda_cleaning(self):
+ self._test_prepare_instance_anaconda(prov_state=states.CLEANING)
+
@mock.patch.object(deploy_utils, 'is_iscsi_boot',
spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
diff --git a/ironic/tests/unit/drivers/modules/inspector/test_interface.py b/ironic/tests/unit/drivers/modules/inspector/test_interface.py
index f4dbdd4b0..42bb55f2b 100644
--- a/ironic/tests/unit/drivers/modules/inspector/test_interface.py
+++ b/ironic/tests/unit/drivers/modules/inspector/test_interface.py
@@ -505,22 +505,24 @@ class CheckStatusTestCase(BaseTestCase):
self.task)
self.driver.boot.clean_up_ramdisk.assert_called_once_with(self.task)
- @mock.patch.object(inspect_utils, 'store_introspection_data',
- autospec=True)
+ @mock.patch.object(inspect_utils, 'store_inspection_data', autospec=True)
def test_status_ok_store_inventory(self, mock_store_data, mock_client):
mock_get = mock_client.return_value.get_introspection
mock_get.return_value = mock.Mock(is_finished=True,
error=None,
spec=['is_finished', 'error'])
- fake_introspection_data = {
- "inventory": {"cpu": "amd"}, "disks": [{"name": "/dev/vda"}]}
+ fake_inventory = {"cpu": "amd"}
+ fake_plugin_data = {"disks": [{"name": "/dev/vda"}]}
+ fake_introspection_data = dict(fake_plugin_data,
+ inventory=fake_inventory)
mock_get_data = mock_client.return_value.get_introspection_data
mock_get_data.return_value = fake_introspection_data
inspector._check_status(self.task)
mock_get.assert_called_once_with(self.node.uuid)
mock_get_data.assert_called_once_with(self.node.uuid, processed=True)
mock_store_data.assert_called_once_with(self.node,
- fake_introspection_data,
+ fake_inventory,
+ fake_plugin_data,
self.task.context)
def test_status_ok_store_inventory_nostore(self, mock_client):
diff --git a/ironic/tests/unit/drivers/modules/test_inspect_utils.py b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
index 473b0ee7c..57980d5a2 100644
--- a/ironic/tests/unit/drivers/modules/test_inspect_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
@@ -103,7 +103,7 @@ class SwiftCleanUp(db_base.DbTestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_clean_up_swift_entries(self, swift_api_mock):
CONF.set_override('data_backend', 'swift', group='inventory')
- container = 'introspection_data'
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -117,7 +117,7 @@ class SwiftCleanUp(db_base.DbTestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_clean_up_swift_entries_with_404_exception(self, swift_api_mock):
CONF.set_override('data_backend', 'swift', group='inventory')
- container = 'introspection_data'
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -132,7 +132,7 @@ class SwiftCleanUp(db_base.DbTestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_clean_up_swift_entries_with_fail_exception(self, swift_api_mock):
CONF.set_override('data_backend', 'swift', group='inventory')
- container = 'introspection_data'
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -148,7 +148,7 @@ class SwiftCleanUp(db_base.DbTestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_clean_up_swift_entries_with_fail_exceptions(self, swift_api_mock):
CONF.set_override('data_backend', 'swift', group='inventory')
- container = 'introspection_data'
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -171,115 +171,93 @@ class IntrospectionDataStorageFunctionsTestCase(db_base.DbTestCase):
super(IntrospectionDataStorageFunctionsTestCase, self).setUp()
self.node = obj_utils.create_test_node(self.context)
- def test_store_introspection_data_db(self):
- CONF.set_override('data_backend', 'database',
- group='inventory')
- fake_introspection_data = {'inventory': self.fake_inventory_data,
- **self.fake_plugin_data}
+ def test_store_inspection_data_db(self):
+ CONF.set_override('data_backend', 'database', group='inventory')
fake_context = ironic_context.RequestContext()
- utils.store_introspection_data(self.node, fake_introspection_data,
- fake_context)
+ utils.store_inspection_data(self.node, self.fake_inventory_data,
+ self.fake_plugin_data, fake_context)
stored = objects.NodeInventory.get_by_node_id(self.context,
self.node.id)
self.assertEqual(self.fake_inventory_data, stored["inventory_data"])
self.assertEqual(self.fake_plugin_data, stored["plugin_data"])
- @mock.patch.object(utils, '_store_introspection_data_in_swift',
+ @mock.patch.object(utils, '_store_inspection_data_in_swift',
autospec=True)
- def test_store_introspection_data_swift(self, mock_store_data):
+ def test_store_inspection_data_swift(self, mock_store_data):
CONF.set_override('data_backend', 'swift', group='inventory')
CONF.set_override(
- 'swift_data_container', 'introspection_data',
+ 'swift_data_container', 'inspection_data',
group='inventory')
- fake_introspection_data = {
- "inventory": self.fake_inventory_data, **self.fake_plugin_data}
fake_context = ironic_context.RequestContext()
- utils.store_introspection_data(self.node, fake_introspection_data,
- fake_context)
+ utils.store_inspection_data(self.node, self.fake_inventory_data,
+ self.fake_plugin_data, fake_context)
mock_store_data.assert_called_once_with(
self.node.uuid, inventory_data=self.fake_inventory_data,
plugin_data=self.fake_plugin_data)
- def test_store_introspection_data_nostore(self):
+ def test_store_inspection_data_nostore(self):
CONF.set_override('data_backend', 'none', group='inventory')
- fake_introspection_data = {
- "inventory": self.fake_inventory_data, **self.fake_plugin_data}
fake_context = ironic_context.RequestContext()
- ret = utils.store_introspection_data(self.node,
- fake_introspection_data,
- fake_context)
- self.assertIsNone(ret)
-
- def test__node_inventory_convert(self):
- required_output = {"inventory": self.fake_inventory_data,
- "plugin_data": self.fake_plugin_data}
- input_given = {}
- input_given["inventory_data"] = self.fake_inventory_data
- input_given["plugin_data"] = self.fake_plugin_data
- input_given["booom"] = "boom"
- ret = utils._node_inventory_convert(input_given)
- self.assertEqual(required_output, ret)
-
- @mock.patch.object(utils, '_node_inventory_convert', autospec=True)
- @mock.patch.object(objects, 'NodeInventory', spec_set=True, autospec=True)
- def test_get_introspection_data_db(self, mock_inventory, mock_convert):
- CONF.set_override('data_backend', 'database',
- group='inventory')
- fake_introspection_data = {'inventory': self.fake_inventory_data,
- 'plugin_data': self.fake_plugin_data}
+ utils.store_inspection_data(self.node, self.fake_inventory_data,
+ self.fake_plugin_data, fake_context)
+ self.assertRaises(exception.NodeInventoryNotFound,
+ objects.NodeInventory.get_by_node_id,
+ self.context, self.node.id)
+
+ def test_get_inspection_data_db(self):
+ CONF.set_override('data_backend', 'database', group='inventory')
+ obj_utils.create_test_inventory(
+ self.context, self.node,
+ inventory_data=self.fake_inventory_data,
+ plugin_data=self.fake_plugin_data)
fake_context = ironic_context.RequestContext()
- mock_inventory.get_by_node_id.return_value = fake_introspection_data
- utils.get_introspection_data(self.node, fake_context)
- mock_convert.assert_called_once_with(fake_introspection_data)
-
- @mock.patch.object(objects, 'NodeInventory', spec_set=True, autospec=True)
- def test_get_introspection_data_db_exception(self, mock_inventory):
- CONF.set_override('data_backend', 'database',
- group='inventory')
+ ret = utils.get_inspection_data(self.node, fake_context)
+ fake_inspection_data = {'inventory': self.fake_inventory_data,
+ 'plugin_data': self.fake_plugin_data}
+ self.assertEqual(ret, fake_inspection_data)
+
+ def test_get_inspection_data_db_exception(self):
+ CONF.set_override('data_backend', 'database', group='inventory')
fake_context = ironic_context.RequestContext()
- mock_inventory.get_by_node_id.side_effect = [
- exception.NodeInventoryNotFound(self.node.uuid)]
self.assertRaises(
- exception.NodeInventoryNotFound, utils.get_introspection_data,
+ exception.NodeInventoryNotFound, utils.get_inspection_data,
self.node, fake_context)
- @mock.patch.object(utils, '_get_introspection_data_from_swift',
- autospec=True)
- def test_get_introspection_data_swift(self, mock_get_data):
+ @mock.patch.object(utils, '_get_inspection_data_from_swift', autospec=True)
+ def test_get_inspection_data_swift(self, mock_get_data):
CONF.set_override('data_backend', 'swift', group='inventory')
CONF.set_override(
- 'swift_data_container', 'introspection_data',
+ 'swift_data_container', 'inspection_data',
group='inventory')
fake_context = ironic_context.RequestContext()
- utils.get_introspection_data(self.node, fake_context)
- mock_get_data.assert_called_once_with(
- self.node.uuid)
+ ret = utils.get_inspection_data(self.node, fake_context)
+ mock_get_data.assert_called_once_with(self.node.uuid)
+ self.assertEqual(mock_get_data.return_value, ret)
- @mock.patch.object(utils, '_get_introspection_data_from_swift',
- autospec=True)
- def test_get_introspection_data_swift_exception(self, mock_get_data):
+ @mock.patch.object(utils, '_get_inspection_data_from_swift', autospec=True)
+ def test_get_inspection_data_swift_exception(self, mock_get_data):
CONF.set_override('data_backend', 'swift', group='inventory')
CONF.set_override(
- 'swift_data_container', 'introspection_data',
+ 'swift_data_container', 'inspection_data',
group='inventory')
fake_context = ironic_context.RequestContext()
mock_get_data.side_effect = exception.SwiftObjectNotFoundError()
self.assertRaises(
- exception.NodeInventoryNotFound, utils.get_introspection_data,
+ exception.NodeInventoryNotFound, utils.get_inspection_data,
self.node, fake_context)
- def test_get_introspection_data_nostore(self):
+ def test_get_inspection_data_nostore(self):
CONF.set_override('data_backend', 'none', group='inventory')
fake_context = ironic_context.RequestContext()
self.assertRaises(
- exception.NodeInventoryNotFound, utils.get_introspection_data,
+ exception.NodeInventoryNotFound, utils.get_inspection_data,
self.node, fake_context)
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
- def test__store_introspection_data_in_swift(self, swift_api_mock):
- container = 'introspection_data'
+ def test__store_inspection_data_in_swift(self, swift_api_mock):
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
- utils._store_introspection_data_in_swift(
+ utils._store_inspection_data_in_swift(
self.node.uuid, self.fake_inventory_data, self.fake_plugin_data)
swift_obj_mock = swift_api_mock.return_value
object_name = 'inspector_data-' + str(self.node.uuid)
@@ -290,23 +268,22 @@ class IntrospectionDataStorageFunctionsTestCase(db_base.DbTestCase):
container)])
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
- def test__get_introspection_data_from_swift(self, swift_api_mock):
- container = 'introspection_data'
+ def test__get_inspection_data_from_swift(self, swift_api_mock):
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
swift_obj_mock.get_object.side_effect = [
self.fake_inventory_data,
self.fake_plugin_data
]
- ret = utils._get_introspection_data_from_swift(self.node.uuid)
+ ret = utils._get_inspection_data_from_swift(self.node.uuid)
req_ret = {"inventory": self.fake_inventory_data,
"plugin_data": self.fake_plugin_data}
self.assertEqual(req_ret, ret)
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
- def test__get_introspection_data_from_swift_exception(self,
- swift_api_mock):
- container = 'introspection_data'
+ def test__get_inspection_data_from_swift_exception(self, swift_api_mock):
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
swift_obj_mock.get_object.side_effect = [
@@ -314,5 +291,5 @@ class IntrospectionDataStorageFunctionsTestCase(db_base.DbTestCase):
self.fake_plugin_data
]
self.assertRaises(exception.SwiftObjectNotFoundError,
- utils._get_introspection_data_from_swift,
+ utils._get_inspection_data_from_swift,
self.node.uuid)
diff --git a/ironic/tests/unit/objects/utils.py b/ironic/tests/unit/objects/utils.py
index 26c3a22e7..979ab28a2 100644
--- a/ironic/tests/unit/objects/utils.py
+++ b/ironic/tests/unit/objects/utils.py
@@ -380,3 +380,15 @@ class SchemasTestMixIn(object):
"for %s, schema key %s has invalid %s "
"field %s" % (payload, schema_key, resource,
key))
+
+
+def create_test_inventory(ctxt, node, **kw):
+ """Create and return a test node inventory object."""
+ inv = objects.NodeInventory(ctxt)
+ if not isinstance(node, str):
+ node = node.id
+ kw['node_id'] = node
+ for key, value in kw.items():
+ setattr(inv, key, value)
+ inv.create()
+ return inv
diff --git a/playbooks/metal3-ci/run.yaml b/playbooks/metal3-ci/run.yaml
index 66886b26a..fdee0ffde 100644
--- a/playbooks/metal3-ci/run.yaml
+++ b/playbooks/metal3-ci/run.yaml
@@ -5,24 +5,24 @@
set_fact:
metal3_dev_env_src_dir: '{{ ansible_user_dir }}/metal3-dev-env'
metal3_environment:
+ ANSIBLE_VERBOSITY: 2
CONTROL_PLANE_MACHINE_COUNT: 1
IMAGE_OS: ubuntu
IMAGE_USERNAME: zuul
+ IRONIC_FROM_SOURCE: "true"
+ IRONIC_SOURCE: "/home/zuul/src/opendev.org/openstack/ironic"
# NOTE(dtantsur): we don't have enough resources to provision even
# a 2-node cluster, so only provision a control plane node.
NUM_NODES: 2
+ LIBVIRT_DOMAIN_TYPE: "qemu"
WORKER_MACHINE_COUNT: 1
# TODO(dtantsur): add metal3-io/metal3-dev-env as a recognized project to
# https://opendev.org/openstack/project-config/src/commit/e15b9cae77bdc243322cee64b3688a2a43dd193c/zuul/main.yaml#L1416
- # TODO(dtantsur): replace my fork with the upstream source once all fixes
- # merge there.
- # TODO(rpittau): move back to dtantsur or metal3-io after we merge the changes
- name: Clone metal3-dev-env
git:
dest: "{{ metal3_dev_env_src_dir }}"
- repo: "https://github.com/elfosardo/metal3-dev-env"
- version: ironic-ci
+ repo: "https://github.com/metal3-io/metal3-dev-env"
- name: Build a metal3 environment
command: make
diff --git a/releasenotes/notes/Cleanfail-power-off-13b5fdcc2727866a.yaml b/releasenotes/notes/Cleanfail-power-off-13b5fdcc2727866a.yaml
new file mode 100644
index 000000000..2856bac06
--- /dev/null
+++ b/releasenotes/notes/Cleanfail-power-off-13b5fdcc2727866a.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Add new conductor conf option: [conductor]poweroff_in_cleanfail
+ (default: False). when True nodes entering clean failed state
+ will be powered off. This option may be unsafe when using
+ Cleaning to perform hardware-transformative actions such as
+ firmware upgrade.
diff --git a/releasenotes/notes/cross-link-1ffd1a4958f14fd7.yaml b/releasenotes/notes/cross-link-1ffd1a4958f14fd7.yaml
new file mode 100644
index 000000000..caac13dd4
--- /dev/null
+++ b/releasenotes/notes/cross-link-1ffd1a4958f14fd7.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes ``Invalid cross-device link`` in some cases when using ``file://``
+ image URLs.
diff --git a/releasenotes/notes/fix_boot_mode_switch_with_anaconda_deploy_with_ilo_drivers-16637adb62f0ed2f.yaml b/releasenotes/notes/fix_boot_mode_switch_with_anaconda_deploy_with_ilo_drivers-16637adb62f0ed2f.yaml
new file mode 100644
index 000000000..c3096be79
--- /dev/null
+++ b/releasenotes/notes/fix_boot_mode_switch_with_anaconda_deploy_with_ilo_drivers-16637adb62f0ed2f.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Enables boot mode switching during anaconda deploy for ``ilo``
+ and ``ilo5`` hardware types.
diff --git a/releasenotes/notes/graceful_shutdown_wait-9a62627714b86726.yaml b/releasenotes/notes/graceful_shutdown_wait-9a62627714b86726.yaml
new file mode 100644
index 000000000..778b7dc6f
--- /dev/null
+++ b/releasenotes/notes/graceful_shutdown_wait-9a62627714b86726.yaml
@@ -0,0 +1,15 @@
+---
+features:
+ - |
+ On shutdown the conductor will wait for at most
+ ``[DEFAULT]graceful_shutdown_timeout`` seconds for existing lock node
+ reservations to clear. Previously lock reservations were cleared
+ immediately, which in some cases would result in nodes going into a failed
+ state.
+upgrade:
+ - |
+ ``[DEFAULT]graceful_shutdown_timeout`` defaults to 60s. Systemd
+ ``TimeoutStopSec`` defaults to 30s. Kubernetes
+ ``terminationGracePeriodSeconds`` defaults to 90s. It is recommended to
+ align the value of ``[DEFAULT]graceful_shutdown_timeout`` with the graceful
+ timeout of the process manager of the conductor process. \ No newline at end of file
diff --git a/tools/test-setup.sh b/tools/test-setup.sh
index 16974adb5..924f0c824 100755
--- a/tools/test-setup.sh
+++ b/tools/test-setup.sh
@@ -83,5 +83,5 @@ EOF
chmod 0600 $HOME/.pgpass
# Now create our database
-psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest"
+psql -h 127.0.0.1 -U $DB_USER -d postgres -c "DROP DATABASE IF EXISTS openstack_citest"
createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest
diff --git a/tox.ini b/tox.ini
index 1792d81c9..1fa446d66 100644
--- a/tox.ini
+++ b/tox.ini
@@ -132,7 +132,7 @@ commands = {posargs}
# [W503] Line break before binary operator.
ignore = E129,E741,W503
filename = *.py,app.wsgi
-exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build
+exclude=.*,dist,doc,*lib/python*,*egg,build
import-order-style = pep8
application-import-names = ironic
max-complexity=19
diff --git a/zuul.d/ironic-jobs.yaml b/zuul.d/ironic-jobs.yaml
index ca1757417..d2e91875f 100644
--- a/zuul.d/ironic-jobs.yaml
+++ b/zuul.d/ironic-jobs.yaml
@@ -896,6 +896,7 @@
SWIFT_TEMPURL_KEY: secretkey
EBTABLES_RACE_FIX: True
LIBVIRT_STORAGE_POOL_PATH: /opt/libvirt/images
+ INSTANCE_WAIT: 120
old:
IRONIC_VM_LOG_DIR: '{{ devstack_bases.old }}/ironic-bm-logs'
grenade_localrc: