summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/baremetal-api-v1-nodes-inventory.inc8
-rw-r--r--api-ref/source/index.rst1
-rw-r--r--doc/source/admin/drivers/fake.rst24
-rw-r--r--doc/source/admin/drivers/ibmc.rst2
-rw-r--r--doc/source/admin/fast-track.rst13
-rw-r--r--doc/source/admin/interfaces/deploy.rst40
-rw-r--r--doc/source/admin/metrics.rst34
-rw-r--r--doc/source/install/enrollment.rst26
-rw-r--r--doc/source/install/refarch/common.rst5
-rw-r--r--ironic/api/controllers/v1/__init__.py146
-rw-r--r--ironic/api/controllers/v1/node.py13
-rw-r--r--ironic/api/controllers/v1/ramdisk.py6
-rw-r--r--ironic/common/kickstart_utils.py4
-rw-r--r--ironic/common/molds.py6
-rw-r--r--ironic/common/release_mappings.py68
-rw-r--r--ironic/common/states.py3
-rw-r--r--ironic/common/utils.py28
-rw-r--r--ironic/conductor/cleaning.py26
-rw-r--r--ironic/conductor/inspection.py108
-rw-r--r--ironic/conductor/manager.py166
-rw-r--r--ironic/conductor/periodics.py17
-rw-r--r--ironic/conductor/task_manager.py11
-rw-r--r--ironic/conductor/utils.py19
-rw-r--r--ironic/conf/__init__.py4
-rw-r--r--ironic/conf/conductor.py35
-rw-r--r--ironic/conf/default.py5
-rw-r--r--ironic/conf/fake.py85
-rw-r--r--ironic/conf/inventory.py15
-rw-r--r--ironic/conf/opts.py1
-rw-r--r--ironic/conf/sensor_data.py89
-rw-r--r--ironic/db/sqlalchemy/api.py31
-rw-r--r--ironic/drivers/modules/ansible/playbooks/library/stream_url.py3
-rw-r--r--ironic/drivers/modules/deploy_utils.py37
-rw-r--r--ironic/drivers/modules/fake.py63
-rw-r--r--ironic/drivers/modules/ilo/boot.py12
-rw-r--r--ironic/drivers/modules/inspect_utils.py129
-rw-r--r--ironic/drivers/modules/inspector/__init__.py15
-rw-r--r--ironic/drivers/modules/inspector/client.py57
-rw-r--r--ironic/drivers/modules/inspector/interface.py (renamed from ironic/drivers/modules/inspector.py)105
-rw-r--r--ironic/drivers/modules/pxe.py15
-rw-r--r--ironic/drivers/modules/pxe_base.py7
-rw-r--r--ironic/drivers/modules/snmp.py4
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py105
-rw-r--r--ironic/tests/unit/common/test_kickstart_utils.py3
-rw-r--r--ironic/tests/unit/common/test_molds.py35
-rw-r--r--ironic/tests/unit/common/test_release_mappings.py2
-rw-r--r--ironic/tests/unit/conductor/test_cleaning.py21
-rw-r--r--ironic/tests/unit/conductor/test_inspection.py118
-rw-r--r--ironic/tests/unit/conductor/test_manager.py231
-rw-r--r--ironic/tests/unit/conductor/test_utils.py55
-rw-r--r--ironic/tests/unit/db/test_api.py29
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_boot.py78
-rw-r--r--ironic/tests/unit/drivers/modules/inspector/__init__.py0
-rw-r--r--ironic/tests/unit/drivers/modules/inspector/test_client.py65
-rw-r--r--ironic/tests/unit/drivers/modules/inspector/test_interface.py (renamed from ironic/tests/unit/drivers/modules/test_inspector.py)70
-rw-r--r--ironic/tests/unit/drivers/modules/test_deploy_utils.py73
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspect_utils.py136
-rw-r--r--ironic/tests/unit/drivers/modules/test_pxe.py10
-rw-r--r--ironic/tests/unit/drivers/modules/test_snmp.py35
-rw-r--r--ironic/tests/unit/drivers/test_fake_hardware.py29
-rw-r--r--ironic/tests/unit/objects/utils.py12
-rw-r--r--playbooks/metal3-ci/fetch_kube_logs.yaml32
-rw-r--r--playbooks/metal3-ci/fetch_pod_logs.yaml24
-rw-r--r--playbooks/metal3-ci/post.yaml194
-rw-r--r--playbooks/metal3-ci/run.yaml37
-rw-r--r--releasenotes/notes/bug-2010613-3ab1f32aaa776f28.yaml7
-rw-r--r--releasenotes/notes/cleaning-error-5c13c33c58404b97.yaml8
-rw-r--r--releasenotes/notes/conductor-metric-collector-support-1b8b8c71f9f59da4.yaml39
-rw-r--r--releasenotes/notes/fakedelay-7eac23ad8881a736.yaml8
-rw-r--r--releasenotes/notes/fix-online-version-migration-db432a7b239647fa.yaml14
-rw-r--r--releasenotes/notes/fix-power-off-token-wipe-e7d605997f00d39d.yaml6
-rw-r--r--releasenotes/notes/fix_boot_mode_switch_with_anaconda_deploy_with_ilo_drivers-16637adb62f0ed2f.yaml5
-rw-r--r--releasenotes/notes/fix_secure_boot_with_anaconda_deploy-84d7c1e3bbfa40f2.yaml4
-rw-r--r--releasenotes/notes/ironic-antelope-prelude-0b77964469f56b13.yaml14
-rw-r--r--releasenotes/notes/no-recalculate-653e524fd6160e72.yaml5
-rw-r--r--releasenotes/notes/wipe-agent-token-upon-cleaning-timeout-c9add514fad1b02c.yaml7
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po11
-rw-r--r--releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po159
-rw-r--r--requirements.txt2
-rw-r--r--tools/benchmark/generate-statistics.py2
-rw-r--r--tox.ini2
-rw-r--r--zuul.d/metal3-jobs.yaml30
-rw-r--r--zuul.d/project.yaml2
85 files changed, 2368 insertions, 844 deletions
diff --git a/api-ref/source/baremetal-api-v1-nodes-inventory.inc b/api-ref/source/baremetal-api-v1-nodes-inventory.inc
index 4c36e5aa2..ed3fb9a81 100644
--- a/api-ref/source/baremetal-api-v1-nodes-inventory.inc
+++ b/api-ref/source/baremetal-api-v1-nodes-inventory.inc
@@ -9,8 +9,12 @@ Node inventory
Given a Node identifier, the API provides access to the introspection data
associated to the Node via ``v1/nodes/{node_ident}/inventory`` endpoint.
-Fetch node inventory
-===============================
+The format inventory comes from ironic-python-agent and is currently documented
+in the `agent inventory documentation
+<https://docs.openstack.org/ironic-python-agent/latest/admin/how_it_works.html#hardware-inventory>`_.
+
+Show Node Inventory
+===================
.. rest_method:: GET /v1/nodes/{node_ident}/inventory
diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst
index bb41ba6fd..12beed40e 100644
--- a/api-ref/source/index.rst
+++ b/api-ref/source/index.rst
@@ -28,6 +28,7 @@
.. include:: baremetal-api-v1-node-allocation.inc
.. include:: baremetal-api-v1-deploy-templates.inc
.. include:: baremetal-api-v1-nodes-history.inc
+.. include:: baremetal-api-v1-nodes-inventory.inc
.. include:: baremetal-api-v1-shards.inc
.. NOTE(dtantsur): keep chassis close to the end since it's semi-deprecated
.. include:: baremetal-api-v1-chassis.inc
diff --git a/doc/source/admin/drivers/fake.rst b/doc/source/admin/drivers/fake.rst
index ea7d7ef4c..2e2cc355e 100644
--- a/doc/source/admin/drivers/fake.rst
+++ b/doc/source/admin/drivers/fake.rst
@@ -23,6 +23,30 @@ Development
Developers can use ``fake-hardware`` hardware-type to mock out nodes for
testing without those nodes needing to exist with physical or virtual hardware.
+Scale testing
+-------------
+The ``fake`` drivers have a configurable delay in seconds which will result in
+those operations taking that long to complete. Two comma-delimited values will
+result in a delay with a triangular random distribution, weighted on the first
+value. These delays are applied to operations which typically block in other
+drivers. This allows more realistic scenarios to be arranged for performance and
+functional testing of an Ironic service without requiring real bare metal or
+faking at the BMC protocol level.
+
+.. code-block:: ini
+
+ [fake]
+ power_delay = 5
+ boot_delay = 10
+ deploy_delay = 60,360
+ vendor_delay = 1
+ management_delay = 5
+ inspect_delay = 360,480
+ raid_delay = 10
+ bios_delay = 5
+ storage_delay = 10
+ rescue_delay = 120
+
Adoption
--------
Some OpenStack deployers have used ``fake`` interfaces in Ironic to allow an
diff --git a/doc/source/admin/drivers/ibmc.rst b/doc/source/admin/drivers/ibmc.rst
index 1bf9a3ba2..0f7fe1d90 100644
--- a/doc/source/admin/drivers/ibmc.rst
+++ b/doc/source/admin/drivers/ibmc.rst
@@ -312,6 +312,6 @@ boot_up_seq GET Query boot up sequence
get_raid_controller_list GET Query RAID controller summary info
======================== ============ ======================================
-.. _Huawei iBMC: https://e.huawei.com/en/products/cloud-computing-dc/servers/accessories/ibmc
+.. _Huawei iBMC: https://e.huawei.com/en/products/computing/kunpeng/accessories/ibmc
.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security
.. _HUAWEI iBMC Client library: https://pypi.org/project/python-ibmcclient/
diff --git a/doc/source/admin/fast-track.rst b/doc/source/admin/fast-track.rst
index 20ca6199f..e42942818 100644
--- a/doc/source/admin/fast-track.rst
+++ b/doc/source/admin/fast-track.rst
@@ -15,6 +15,19 @@ provisioned within a short period of time.
the ``noop`` networking. The case where inspection, cleaning and
provisioning networks are different is not supported.
+.. note::
+ Fast track mode is very sensitive to long-running processes on the conductor
+ side that may prevent agent heartbeats from being registered.
+
+ For example, converting a large image to the raw format may take long enough
+ to reach the fast track timeout. In this case, you can either :ref:`use raw
+ images <stream_raw_images>` or move the conversion to the agent side with:
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ force_raw_images = False
+
Enabling
========
diff --git a/doc/source/admin/interfaces/deploy.rst b/doc/source/admin/interfaces/deploy.rst
index 7db5a24ff..79d004ad0 100644
--- a/doc/source/admin/interfaces/deploy.rst
+++ b/doc/source/admin/interfaces/deploy.rst
@@ -81,6 +81,46 @@ accessible from HTTP service. Please refer to configuration option
``FollowSymLinks`` if you are using Apache HTTP server, or
``disable_symlinks`` if Nginx HTTP server is in use.
+.. _stream_raw_images:
+
+Streaming raw images
+--------------------
+
+The Bare Metal service is capable of streaming raw images directly to the
+target disk of a node, without caching them in the node's RAM. When the source
+image is not already raw, the conductor will convert the image and calculate
+the new checksum.
+
+.. note::
+ If no algorithm is specified via the ``image_os_hash_algo`` field, or if
+ this field is set to ``md5``, SHA256 is used for the updated checksum.
+
+For HTTP or local file images that are already raw, you need to explicitly set
+the disk format to prevent the checksum from being unnecessarily re-calculated.
+For example:
+
+.. code-block:: shell
+
+ baremetal node set <node> \
+ --instance-info image_source=http://server/myimage.img \
+ --instance-info image_os_hash_algo=sha512 \
+ --instance-info image_os_hash_value=<SHA512 of the raw image> \
+ --instance-info image_disk_format=raw
+
+To disable this feature and cache images in the node's RAM, set
+
+.. code-block:: ini
+
+ [agent]
+ stream_raw_images = False
+
+To disable the conductor-side conversion completely, set
+
+.. code-block:: ini
+
+ [DEFAULT]
+ force_raw_images = False
+
.. _ansible-deploy:
Ansible deploy
diff --git a/doc/source/admin/metrics.rst b/doc/source/admin/metrics.rst
index f435a50c5..733c6569b 100644
--- a/doc/source/admin/metrics.rst
+++ b/doc/source/admin/metrics.rst
@@ -17,8 +17,11 @@ These performance measurements, herein referred to as "metrics", can be
emitted from the Bare Metal service, including ironic-api, ironic-conductor,
and ironic-python-agent. By default, none of the services will emit metrics.
-Configuring the Bare Metal Service to Enable Metrics
-====================================================
+It is important to stress that not only statsd is supported for metrics
+collection and transmission. This is covered later on in our documentation.
+
+Configuring the Bare Metal Service to Enable Metrics with Statsd
+================================================================
Enabling metrics in ironic-api and ironic-conductor
---------------------------------------------------
@@ -62,6 +65,30 @@ in the ironic configuration file as well::
agent_statsd_host = 198.51.100.2
agent_statsd_port = 8125
+.. Note::
+ Use of a different metrics backend with the agent is not presently
+ supported.
+
+Transmission to the Message Bus Notifier
+========================================
+
+Regardless if you're using Ceilometer,
+`ironic-prometheus-exporter <https://docs.openstack.org/ironic-prometheus-exporter/latest/>`_,
+or some scripting you wrote to consume the message bus notifications,
+metrics data can be sent to the message bus notifier from the timer methods
+*and* additional gauge counters by utilizing the ``[metrics]backend``
+configuration option and setting it to ``collector``. When this is the case,
+Information is cached locally and periodically sent along with the general sensor
+data update to the messaging notifier, which can consumed off of the message bus,
+or via notifier plugin (such as is done with ironic-prometheus-exporter).
+
+.. NOTE::
+ Transmission of timer data only works for the Conductor or ``single-process``
+ Ironic service model. A separate webserver process presently does not have
+ the capability of triggering the call to retrieve and transmit the data.
+
+.. NOTE::
+ This functionality requires ironic-lib version 5.4.0 to be installed.
Types of Metrics Emitted
========================
@@ -79,6 +106,9 @@ additional load before enabling metrics. To see which metrics have changed names
or have been removed between releases, refer to the `ironic release notes
<https://docs.openstack.org/releasenotes/ironic/>`_.
+Additional conductor metrics in the form of counts will also be generated in
+limited locations where petinant to the activity of the conductor.
+
.. note::
With the default statsd configuration, each timing metric may create
additional metrics due to how statsd handles timing metrics. For more
diff --git a/doc/source/install/enrollment.rst b/doc/source/install/enrollment.rst
index 9f9355d75..40c63b4bb 100644
--- a/doc/source/install/enrollment.rst
+++ b/doc/source/install/enrollment.rst
@@ -81,15 +81,9 @@ affected, since the initial provision state is still ``available``.
However, using API version 1.11 or above may break existing automation tooling
with respect to node creation.
-The default API version used by (the most recent) python-ironicclient is 1.9,
-but it may change in the future and should not be relied on.
-
-In the examples below we will use version 1.11 of the Bare metal API.
-This gives us the following advantages:
-
-* Explicit power credentials validation before leaving the ``enroll`` state.
-* Running node cleaning before entering the ``available`` state.
-* Not exposing half-configured nodes to the scheduler.
+The ``openstack baremetal`` command line tool tries to negotiate the most
+recent supported version, which in virtually all cases will be newer than
+1.11.
To set the API version for all commands, you can set the environment variable
``IRONIC_API_VERSION``. For the OpenStackClient baremetal plugin, set
@@ -118,7 +112,6 @@ and may be combined if desired.
.. code-block:: console
- $ export OS_BAREMETAL_API_VERSION=1.11
$ baremetal node create --driver ipmi
+--------------+--------------------------------------+
| Property | Value |
@@ -423,12 +416,13 @@ Validating node information
Making node available for deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In order for nodes to be available for deploying workloads on them, nodes
-must be in the ``available`` provision state. To do this, nodes
-created with API version 1.11 and above must be moved from the ``enroll`` state
-to the ``manageable`` state and then to the ``available`` state.
-This section can be safely skipped, if API version 1.10 or earlier is used
-(which is the case by default).
+In order for nodes to be available for deploying workloads on them, nodes must
+be in the ``available`` provision state. To do this, nodes must be moved from
+the ``enroll`` state to the ``manageable`` state and then to the ``available``
+state.
+
+.. note::
+ This section can be skipped, if API version 1.10 or earlier is used.
After creating a node and before moving it from its initial provision state of
``enroll``, basic power and port information needs to be configured on the node.
diff --git a/doc/source/install/refarch/common.rst b/doc/source/install/refarch/common.rst
index 800632fd5..ce0dedfb1 100644
--- a/doc/source/install/refarch/common.rst
+++ b/doc/source/install/refarch/common.rst
@@ -277,9 +277,8 @@ the space requirements are different:
In both cases a cached image is converted to raw if ``force_raw_images``
is ``True`` (the default).
- .. note::
- ``image_download_source`` can also be provided in the node's
- ``driver_info`` or ``instance_info``. See :ref:`image_download_source`.
+ See :ref:`image_download_source` and :ref:`stream_raw_images` for more
+ details.
* When network boot is used, the instance image kernel and ramdisk are cached
locally while the instance is active.
diff --git a/ironic/api/controllers/v1/__init__.py b/ironic/api/controllers/v1/__init__.py
index 9bd9af985..c352761c7 100644
--- a/ironic/api/controllers/v1/__init__.py
+++ b/ironic/api/controllers/v1/__init__.py
@@ -59,6 +59,27 @@ def max_version():
versions.min_version_string(), versions.max_version_string())
+def make_controller_links(name):
+ return [
+ link.make_link('self', api.request.public_url, name, ''),
+ link.make_link('bookmark', api.request.public_url, name, '',
+ bookmark=True)
+ ]
+
+
+VERSIONED_CONTROLLERS = {
+ 'portgroups': utils.allow_portgroups,
+ 'volume': utils.allow_volume,
+ 'lookup': utils.allow_ramdisk_endpoints,
+ 'heartbeat': utils.allow_ramdisk_endpoints,
+ 'conductors': utils.allow_expose_conductors,
+ 'allocations': utils.allow_allocations,
+ 'events': utils.allow_expose_events,
+ 'deploy_templates': utils.allow_deploy_templates,
+ 'shards': utils.allow_shards_endpoint,
+}
+
+
def v1():
v1 = {
'id': "v1",
@@ -75,124 +96,15 @@ def v1():
'base': 'application/json',
'type': 'application/vnd.openstack.ironic.v1+json'
},
- 'chassis': [
- link.make_link('self', api.request.public_url,
- 'chassis', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'chassis', '',
- bookmark=True)
- ],
- 'nodes': [
- link.make_link('self', api.request.public_url,
- 'nodes', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'nodes', '',
- bookmark=True)
- ],
- 'ports': [
- link.make_link('self', api.request.public_url,
- 'ports', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'ports', '',
- bookmark=True)
- ],
- 'drivers': [
- link.make_link('self', api.request.public_url,
- 'drivers', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'drivers', '',
- bookmark=True)
- ],
- 'version': version.default_version()
+ 'chassis': make_controller_links('chassis'),
+ 'nodes': make_controller_links('nodes'),
+ 'ports': make_controller_links('ports'),
+ 'drivers': make_controller_links('drivers'),
+ 'version': version.default_version(),
}
- if utils.allow_portgroups():
- v1['portgroups'] = [
- link.make_link('self', api.request.public_url,
- 'portgroups', ''),
- link.make_link('bookmark', api.request.public_url,
- 'portgroups', '', bookmark=True)
- ]
- if utils.allow_volume():
- v1['volume'] = [
- link.make_link('self',
- api.request.public_url,
- 'volume', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'volume', '',
- bookmark=True)
- ]
- if utils.allow_ramdisk_endpoints():
- v1['lookup'] = [
- link.make_link('self', api.request.public_url,
- 'lookup', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'lookup', '',
- bookmark=True)
- ]
- v1['heartbeat'] = [
- link.make_link('self',
- api.request.public_url,
- 'heartbeat', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'heartbeat', '',
- bookmark=True)
- ]
- if utils.allow_expose_conductors():
- v1['conductors'] = [
- link.make_link('self',
- api.request.public_url,
- 'conductors', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'conductors', '',
- bookmark=True)
- ]
- if utils.allow_allocations():
- v1['allocations'] = [
- link.make_link('self',
- api.request.public_url,
- 'allocations', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'allocations', '',
- bookmark=True)
- ]
- if utils.allow_expose_events():
- v1['events'] = [
- link.make_link('self', api.request.public_url,
- 'events', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'events', '',
- bookmark=True)
- ]
- if utils.allow_deploy_templates():
- v1['deploy_templates'] = [
- link.make_link('self',
- api.request.public_url,
- 'deploy_templates', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'deploy_templates', '',
- bookmark=True)
- ]
- if utils.allow_shards_endpoint():
- v1['shards'] = [
- link.make_link('self',
- api.request.public_url,
- 'shards', ''),
- link.make_link('bookmark',
- api.request.public_url,
- 'shards', '',
- bookmark=True)
- ]
+ for link_name, check_func in VERSIONED_CONTROLLERS.items():
+ if check_func():
+ v1[link_name] = make_controller_links(link_name)
return v1
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index d21b075c8..65adee544 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -1600,6 +1600,10 @@ def node_sanitize(node, fields, cdict=None,
node['driver_info'] = strutils.mask_dict_password(
node['driver_info'], "******")
+ _mask_fields(node['driver_info'],
+ ['snmp_auth_key', 'snmp_priv_key'],
+ "******")
+
if not show_instance_secrets and 'instance_info' in node_keys:
node['instance_info'] = strutils.mask_dict_password(
node['instance_info'], "******")
@@ -1663,6 +1667,12 @@ def _node_sanitize_extended(node, node_keys, target_dict, cdict):
'driver_internal_info permission. **'}
+def _mask_fields(dictionary, fields, secret):
+ for field in fields:
+ if dictionary.get(field):
+ dictionary[field] = secret
+
+
def node_list_convert_with_links(nodes, limit, url, fields=None, **kwargs):
cdict = api.request.context.to_policy_values()
target_dict = dict(cdict)
@@ -1964,8 +1974,7 @@ class NodeInventoryController(rest.RestController):
"""
node = api_utils.check_node_policy_and_retrieve(
'baremetal:node:inventory:get', self.node_ident)
- return inspect_utils.get_introspection_data(node,
- api.request.context)
+ return inspect_utils.get_inspection_data(node, api.request.context)
class NodesController(rest.RestController):
diff --git a/ironic/api/controllers/v1/ramdisk.py b/ironic/api/controllers/v1/ramdisk.py
index 5feef5e02..b98eb7dc2 100644
--- a/ironic/api/controllers/v1/ramdisk.py
+++ b/ironic/api/controllers/v1/ramdisk.py
@@ -131,13 +131,17 @@ class LookupController(rest.RestController):
else:
node = objects.Node.get_by_port_addresses(
api.request.context, valid_addresses)
- except exception.NotFound:
+ except exception.NotFound as e:
# NOTE(dtantsur): we are reraising the same exception to make sure
# we don't disclose the difference between nodes that are not found
# at all and nodes in a wrong state by different error messages.
+ LOG.error('No node has been found during lookup: %s', e)
raise exception.NotFound()
if CONF.api.restrict_lookup and not self.lookup_allowed(node):
+ LOG.error('Lookup is not allowed for node %(node)s in the '
+ 'provision state %(state)s',
+ {'node': node.uuid, 'state': node.provision_state})
raise exception.NotFound()
if api_utils.allow_agent_token():
diff --git a/ironic/common/kickstart_utils.py b/ironic/common/kickstart_utils.py
index 433cf2390..4e02e2ea7 100644
--- a/ironic/common/kickstart_utils.py
+++ b/ironic/common/kickstart_utils.py
@@ -23,6 +23,7 @@ import pycdlib
import requests
from ironic.common import exception
+from ironic.conf import CONF
LOG = logging.getLogger(__name__)
@@ -107,7 +108,8 @@ def decode_and_extract_config_drive_iso(config_drive_iso_gz):
def _fetch_config_drive_from_url(url):
try:
- config_drive = requests.get(url).content
+ config_drive = requests.get(
+ url, timeout=CONF.webserver_connection_timeout).content
except requests.exceptions.RequestException as e:
raise exception.InstanceDeployFailure(
"Can't download the configdrive content from '%(url)s'. "
diff --git a/ironic/common/molds.py b/ironic/common/molds.py
index 234fcc6e3..a77e42a63 100644
--- a/ironic/common/molds.py
+++ b/ironic/common/molds.py
@@ -49,7 +49,8 @@ def save_configuration(task, url, data):
)
def _request(url, data, auth_header):
return requests.put(
- url, data=json.dumps(data, indent=2), headers=auth_header)
+ url, data=json.dumps(data, indent=2), headers=auth_header,
+ timeout=CONF.webserver_connection_timeout)
auth_header = _get_auth_header(task)
response = _request(url, data, auth_header)
@@ -76,7 +77,8 @@ def get_configuration(task, url):
reraise=True
)
def _request(url, auth_header):
- return requests.get(url, headers=auth_header)
+ return requests.get(url, headers=auth_header,
+ timeout=CONF.webserver_connection_timeout)
auth_header = _get_auth_header(task)
response = _request(url, auth_header)
diff --git a/ironic/common/release_mappings.py b/ironic/common/release_mappings.py
index 7162ca4d3..eb0eb7956 100644
--- a/ironic/common/release_mappings.py
+++ b/ironic/common/release_mappings.py
@@ -510,6 +510,69 @@ RELEASE_MAPPING = {
'VolumeTarget': ['1.0'],
}
},
+ '21.2': {
+ 'api': '1.80',
+ 'rpc': '1.55',
+ 'objects': {
+ 'Allocation': ['1.1'],
+ 'BIOSSetting': ['1.1'],
+ 'Node': ['1.36'],
+ 'NodeHistory': ['1.0'],
+ 'NodeInventory': ['1.0'],
+ 'Conductor': ['1.3'],
+ 'Chassis': ['1.3'],
+ 'Deployment': ['1.0'],
+ 'DeployTemplate': ['1.1'],
+ 'Port': ['1.10'],
+ 'Portgroup': ['1.4'],
+ 'Trait': ['1.0'],
+ 'TraitList': ['1.0'],
+ 'VolumeConnector': ['1.0'],
+ 'VolumeTarget': ['1.0'],
+ }
+ },
+ '21.3': {
+ 'api': '1.81',
+ 'rpc': '1.55',
+ 'objects': {
+ 'Allocation': ['1.1'],
+ 'BIOSSetting': ['1.1'],
+ 'Node': ['1.36'],
+ 'NodeHistory': ['1.0'],
+ 'NodeInventory': ['1.0'],
+ 'Conductor': ['1.3'],
+ 'Chassis': ['1.3'],
+ 'Deployment': ['1.0'],
+ 'DeployTemplate': ['1.1'],
+ 'Port': ['1.11'],
+ 'Portgroup': ['1.4'],
+ 'Trait': ['1.0'],
+ 'TraitList': ['1.0'],
+ 'VolumeConnector': ['1.0'],
+ 'VolumeTarget': ['1.0'],
+ }
+ },
+ '21.4': {
+ 'api': '1.82',
+ 'rpc': '1.55',
+ 'objects': {
+ 'Allocation': ['1.1'],
+ 'BIOSSetting': ['1.1'],
+ 'Node': ['1.37'],
+ 'NodeHistory': ['1.0'],
+ 'NodeInventory': ['1.0'],
+ 'Conductor': ['1.3'],
+ 'Chassis': ['1.3'],
+ 'Deployment': ['1.0'],
+ 'DeployTemplate': ['1.1'],
+ 'Port': ['1.11'],
+ 'Portgroup': ['1.5'],
+ 'Trait': ['1.0'],
+ 'TraitList': ['1.0'],
+ 'VolumeConnector': ['1.0'],
+ 'VolumeTarget': ['1.0'],
+ }
+ },
'master': {
'api': '1.82',
'rpc': '1.55',
@@ -543,12 +606,11 @@ RELEASE_MAPPING = {
#
# Just after we do a new named release, delete the oldest named
# release (that we are no longer supporting for a rolling upgrade).
-#
-# There should be at most two named mappings here.
-# NOTE(mgoddard): remove yoga prior to the antelope release.
RELEASE_MAPPING['yoga'] = RELEASE_MAPPING['20.1']
RELEASE_MAPPING['zed'] = RELEASE_MAPPING['21.1']
+RELEASE_MAPPING['antelope'] = RELEASE_MAPPING['21.4']
+RELEASE_MAPPING['2023.1'] = RELEASE_MAPPING['21.4']
# List of available versions with named versions first; 'master' is excluded.
RELEASE_VERSIONS = sorted(set(RELEASE_MAPPING) - {'master'}, reverse=True)
diff --git a/ironic/common/states.py b/ironic/common/states.py
index 89b710189..f2238b41b 100644
--- a/ironic/common/states.py
+++ b/ironic/common/states.py
@@ -269,6 +269,9 @@ _FASTTRACK_LOOKUP_ALLOWED_STATES = (ENROLL, MANAGEABLE, AVAILABLE,
FASTTRACK_LOOKUP_ALLOWED_STATES = frozenset(_FASTTRACK_LOOKUP_ALLOWED_STATES)
"""States where API lookups are permitted with fast track enabled."""
+FAILURE_STATES = frozenset((DEPLOYFAIL, CLEANFAIL, INSPECTFAIL,
+ RESCUEFAIL, UNRESCUEFAIL, ADOPTFAIL))
+
##############
# Power states
diff --git a/ironic/common/utils.py b/ironic/common/utils.py
index 9ae88d4d6..793b4b501 100644
--- a/ironic/common/utils.py
+++ b/ironic/common/utils.py
@@ -26,6 +26,7 @@ import hashlib
import ipaddress
import os
import re
+import shlex
import shutil
import tempfile
import time
@@ -696,3 +697,30 @@ def stop_after_retries(option, group=None):
return retry_state.attempt_number >= num_retries + 1
return should_stop
+
+
+def is_loopback(hostname_or_ip):
+ """Check if the provided hostname or IP address is a loopback."""
+ try:
+ return ipaddress.ip_address(hostname_or_ip).is_loopback
+ except ValueError: # host name
+ return hostname_or_ip in ('localhost', 'localhost.localdomain')
+
+
+def parse_kernel_params(params):
+ """Parse kernel parameters into a dictionary.
+
+ ``None`` is used as a value for parameters that are not in
+ the ``key=value`` format.
+
+ :param params: kernel parameters as a space-delimited string.
+ """
+ result = {}
+ for s in shlex.split(params):
+ try:
+ key, value = s.split('=', 1)
+ except ValueError:
+ result[s] = None
+ else:
+ result[key] = value
+ return result
diff --git a/ironic/conductor/cleaning.py b/ironic/conductor/cleaning.py
index e59841a99..9e4edb809 100644
--- a/ironic/conductor/cleaning.py
+++ b/ironic/conductor/cleaning.py
@@ -248,12 +248,21 @@ def do_next_clean_step(task, step_index, disable_ramdisk=None):
task.process_event(event)
+def get_last_error(node):
+ last_error = _('By request, the clean operation was aborted')
+ if node.clean_step:
+ last_error += (
+ _(' during or after the completion of step "%s"')
+ % conductor_steps.step_id(node.clean_step)
+ )
+ return last_error
+
+
@task_manager.require_exclusive_lock
-def do_node_clean_abort(task, step_name=None):
+def do_node_clean_abort(task):
"""Internal method to abort an ongoing operation.
:param task: a TaskManager instance with an exclusive lock
- :param step_name: The name of the clean step.
"""
node = task.node
try:
@@ -271,12 +280,13 @@ def do_node_clean_abort(task, step_name=None):
set_fail_state=False)
return
+ last_error = get_last_error(node)
info_message = _('Clean operation aborted for node %s') % node.uuid
- last_error = _('By request, the clean operation was aborted')
- if step_name:
- msg = _(' after the completion of step "%s"') % step_name
- last_error += msg
- info_message += msg
+ if node.clean_step:
+ info_message += (
+ _(' during or after the completion of step "%s"')
+ % node.clean_step
+ )
node.last_error = last_error
node.clean_step = None
@@ -318,7 +328,7 @@ def continue_node_clean(task):
target_state = None
task.process_event('fail', target_state=target_state)
- do_node_clean_abort(task, step_name)
+ do_node_clean_abort(task)
return
LOG.debug('The cleaning operation for node %(node)s was '
diff --git a/ironic/conductor/inspection.py b/ironic/conductor/inspection.py
new file mode 100644
index 000000000..53c76e99d
--- /dev/null
+++ b/ironic/conductor/inspection.py
@@ -0,0 +1,108 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Inspection implementation for the conductor."""
+
+from oslo_log import log
+from oslo_utils import excutils
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import states
+from ironic.conductor import task_manager
+from ironic.conductor import utils
+
+LOG = log.getLogger(__name__)
+
+
+@task_manager.require_exclusive_lock
+def inspect_hardware(task):
+ """Initiates inspection.
+
+ :param task: a TaskManager instance with an exclusive lock
+ on its node.
+ :raises: HardwareInspectionFailure if driver doesn't
+ return the state as states.MANAGEABLE, states.INSPECTWAIT.
+
+ """
+ node = task.node
+
+ def handle_failure(e, log_func=LOG.error):
+ utils.node_history_record(task.node, event=e,
+ event_type=states.INTROSPECTION,
+ error=True, user=task.context.user_id)
+ task.process_event('fail')
+ log_func("Failed to inspect node %(node)s: %(err)s",
+ {'node': node.uuid, 'err': e})
+
+ # Inspection cannot start in fast-track mode, wipe token and URL.
+ utils.wipe_token_and_url(task)
+
+ try:
+ new_state = task.driver.inspect.inspect_hardware(task)
+ except exception.IronicException as e:
+ with excutils.save_and_reraise_exception():
+ error = str(e)
+ handle_failure(error)
+ except Exception as e:
+ error = (_('Unexpected exception of type %(type)s: %(msg)s') %
+ {'type': type(e).__name__, 'msg': e})
+ handle_failure(error, log_func=LOG.exception)
+ raise exception.HardwareInspectionFailure(error=error)
+
+ if new_state == states.MANAGEABLE:
+ task.process_event('done')
+ LOG.info('Successfully inspected node %(node)s',
+ {'node': node.uuid})
+ elif new_state == states.INSPECTWAIT:
+ task.process_event('wait')
+ LOG.info('Successfully started introspection on node %(node)s',
+ {'node': node.uuid})
+ else:
+ error = (_("During inspection, driver returned unexpected "
+ "state %(state)s") % {'state': new_state})
+ handle_failure(error)
+ raise exception.HardwareInspectionFailure(error=error)
+
+
+@task_manager.require_exclusive_lock
+def abort_inspection(task):
+ """Abort inspection for the node."""
+ node = task.node
+
+ try:
+ task.driver.inspect.abort(task)
+ except exception.UnsupportedDriverExtension:
+ with excutils.save_and_reraise_exception():
+ LOG.error('Inspect interface "%(intf)s" does not support abort '
+ 'operation for node %(node)s',
+ {'intf': node.inspect_interface, 'node': node.uuid})
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ LOG.exception('Error when aborting inspection of node %(node)s',
+ {'node': node.uuid})
+ error = _('Failed to abort inspection: %s') % e
+ utils.node_history_record(task.node, event=error,
+ event_type=states.INTROSPECTION,
+ error=True,
+ user=task.context.user_id)
+ node.save()
+
+ error = _('Inspection was aborted by request.')
+ utils.node_history_record(task.node, event=error,
+ event_type=states.INTROSPECTION,
+ error=True,
+ user=task.context.user_id)
+ utils.wipe_token_and_url(task)
+ task.process_event('abort')
+ LOG.info('Successfully aborted inspection of node %(node)s',
+ {'node': node.uuid})
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index ad45d2d74..bbd2355bd 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -63,6 +63,7 @@ from ironic.conductor import allocations
from ironic.conductor import base_manager
from ironic.conductor import cleaning
from ironic.conductor import deployments
+from ironic.conductor import inspection
from ironic.conductor import notification_utils as notify_utils
from ironic.conductor import periodics
from ironic.conductor import steps as conductor_steps
@@ -98,6 +99,8 @@ class ConductorManager(base_manager.BaseConductorManager):
def __init__(self, host, topic):
super(ConductorManager, self).__init__(host, topic)
+ # NOTE(TheJulia): This is less a metric-able count, but a means to
+ # sort out nodes and prioritise a subset (of non-responding nodes).
self.power_state_sync_count = collections.defaultdict(int)
@METRICS.timer('ConductorManager._clean_up_caches')
@@ -1349,7 +1352,8 @@ class ConductorManager(base_manager.BaseConductorManager):
callback=self._spawn_worker,
call_args=(cleaning.do_node_clean_abort, task),
err_handler=utils.provisioning_error_handler,
- target_state=target_state)
+ target_state=target_state,
+ last_error=cleaning.get_last_error(node))
return
if node.provision_state == states.RESCUEWAIT:
@@ -1362,35 +1366,7 @@ class ConductorManager(base_manager.BaseConductorManager):
return
if node.provision_state == states.INSPECTWAIT:
- try:
- task.driver.inspect.abort(task)
- except exception.UnsupportedDriverExtension:
- with excutils.save_and_reraise_exception():
- intf_name = task.driver.inspect.__class__.__name__
- LOG.error('Inspect interface %(intf)s does not '
- 'support abort operation when aborting '
- 'inspection of node %(node)s',
- {'intf': intf_name, 'node': node.uuid})
- except Exception as e:
- with excutils.save_and_reraise_exception():
- LOG.exception('Error in aborting the inspection of '
- 'node %(node)s', {'node': node.uuid})
- error = _('Failed to abort inspection: %s') % e
- utils.node_history_record(task.node, event=error,
- event_type=states.INTROSPECTION,
- error=True,
- user=task.context.user_id)
- node.save()
- error = _('Inspection was aborted by request.')
- utils.node_history_record(task.node, event=error,
- event_type=states.INTROSPECTION,
- error=True,
- user=task.context.user_id)
- utils.wipe_token_and_url(task)
- task.process_event('abort')
- LOG.info('Successfully aborted inspection of node %(node)s',
- {'node': node.uuid})
- return
+ return inspection.abort_inspection(task)
@METRICS.timer('ConductorManager._sync_power_states')
@periodics.periodic(spacing=CONF.conductor.sync_power_state_interval,
@@ -1433,6 +1409,11 @@ class ConductorManager(base_manager.BaseConductorManager):
finally:
waiters.wait_for_all(futures)
+ # report a count of the nodes
+ METRICS.send_gauge(
+ 'ConductorManager.PowerSyncNodesCount',
+ len(nodes))
+
def _sync_power_state_nodes_task(self, context, nodes):
"""Invokes power state sync on nodes from synchronized queue.
@@ -1451,6 +1432,7 @@ class ConductorManager(base_manager.BaseConductorManager):
can do here to avoid failing a brand new deploy to a node that
we've locked here, though.
"""
+
# FIXME(comstud): Since our initial state checks are outside
# of the lock (to try to avoid the lock), some checks are
# repeated after grabbing the lock so we can unlock quickly.
@@ -1497,6 +1479,12 @@ class ConductorManager(base_manager.BaseConductorManager):
LOG.info("During sync_power_state, node %(node)s was not "
"found and presumed deleted by another process.",
{'node': node_uuid})
+ # TODO(TheJulia): The chance exists that we orphan a node
+ # in power_state_sync_count, albeit it is not much data,
+ # it could eventually cause the memory footprint to grow
+ # on an exceptionally large ironic deployment. We should
+ # make sure we clean it up at some point, but overall given
+ # minimal impact, it is definite low hanging fruit.
except exception.NodeLocked:
LOG.info("During sync_power_state, node %(node)s was "
"already locked by another process. Skip.",
@@ -1513,6 +1501,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# regular power state checking, maintenance is still a required
# condition.
filters={'maintenance': True, 'fault': faults.POWER_FAILURE},
+ node_count_metric_name='ConductorManager.PowerSyncRecoveryNodeCount',
)
def _power_failure_recovery(self, task, context):
"""Periodic task to check power states for nodes in maintenance.
@@ -1855,6 +1844,7 @@ class ConductorManager(base_manager.BaseConductorManager):
predicate=lambda n, m: n.conductor_affinity != m.conductor.id,
limit=lambda: CONF.conductor.periodic_max_workers,
shared_task=False,
+ node_count_metric_name='ConductorManager.SyncLocalStateNodeCount',
)
def _sync_local_state(self, task, context):
"""Perform any actions necessary to sync local state.
@@ -2640,14 +2630,63 @@ class ConductorManager(base_manager.BaseConductorManager):
# Yield on every iteration
eventlet.sleep(0)
+ def _sensors_conductor(self, context):
+ """Called to collect and send metrics "sensors" for the conductor."""
+ # populate the message which will be sent to ceilometer
+ # or other data consumer
+ message = {'message_id': uuidutils.generate_uuid(),
+ 'timestamp': datetime.datetime.utcnow(),
+ 'hostname': self.host}
+
+ try:
+ ev_type = 'ironic.metrics'
+ message['event_type'] = ev_type + '.update'
+ sensors_data = METRICS.get_metrics_data()
+ except AttributeError:
+ # TODO(TheJulia): Remove this at some point, but right now
+ # don't inherently break on version mismatches when people
+ # disregard requriements.
+ LOG.warning(
+ 'get_sensors_data has been configured to collect '
+ 'conductor metrics, however the installed ironic-lib '
+ 'library lacks the functionality. Please update '
+ 'ironic-lib to a minimum of version 5.4.0.')
+ except Exception as e:
+ LOG.exception(
+ "An unknown error occured while attempting to collect "
+ "sensor data from within the conductor. Error: %(error)s",
+ {'error': e})
+ else:
+ message['payload'] = (
+ self._filter_out_unsupported_types(sensors_data))
+ if message['payload']:
+ self.sensors_notifier.info(
+ context, ev_type, message)
+
@METRICS.timer('ConductorManager._send_sensor_data')
- @periodics.periodic(spacing=CONF.conductor.send_sensor_data_interval,
- enabled=CONF.conductor.send_sensor_data)
+ @periodics.periodic(spacing=CONF.sensor_data.interval,
+ enabled=CONF.sensor_data.send_sensor_data)
def _send_sensor_data(self, context):
"""Periodically collects and transmits sensor data notifications."""
+ if CONF.sensor_data.enable_for_conductor:
+ if CONF.sensor_data.workers == 1:
+ # Directly call the sensors_conductor when only one
+ # worker is permitted, so we collect data serially
+ # instead.
+ self._sensors_conductor(context)
+ else:
+ # Also, do not apply the general threshold limit to
+ # the self collection of "sensor" data from the conductor,
+ # as were not launching external processes, we're just reading
+ # from an internal data structure, if we can.
+ self._spawn_worker(self._sensors_conductor, context)
+ if not CONF.sensor_data.enable_for_nodes:
+ # NOTE(TheJulia): If node sensor data is not required, then
+ # skip the rest of this method.
+ return
filters = {}
- if not CONF.conductor.send_sensor_data_for_undeployed_nodes:
+ if not CONF.sensor_data.enable_for_undeployed_nodes:
filters['provision_state'] = states.ACTIVE
nodes = queue.Queue()
@@ -2655,7 +2694,7 @@ class ConductorManager(base_manager.BaseConductorManager):
filters=filters):
nodes.put_nowait(node_info)
- number_of_threads = min(CONF.conductor.send_sensor_data_workers,
+ number_of_threads = min(CONF.sensor_data.workers,
nodes.qsize())
futures = []
for thread_number in range(number_of_threads):
@@ -2671,7 +2710,7 @@ class ConductorManager(base_manager.BaseConductorManager):
break
done, not_done = waiters.wait_for_all(
- futures, timeout=CONF.conductor.send_sensor_data_wait_timeout)
+ futures, timeout=CONF.sensor_data.wait_timeout)
if not_done:
LOG.warning("%d workers for send sensors data did not complete",
len(not_done))
@@ -2680,13 +2719,14 @@ class ConductorManager(base_manager.BaseConductorManager):
"""Filters out sensor data types that aren't specified in the config.
Removes sensor data types that aren't specified in
- CONF.conductor.send_sensor_data_types.
+ CONF.sensor_data.data_types.
:param sensors_data: dict containing sensor types and the associated
data
:returns: dict with unsupported sensor types removed
"""
- allowed = set(x.lower() for x in CONF.conductor.send_sensor_data_types)
+ allowed = set(x.lower() for x in
+ CONF.sensor_data.data_types)
if 'all' in allowed:
return sensors_data
@@ -2971,7 +3011,7 @@ class ConductorManager(base_manager.BaseConductorManager):
task.process_event(
'inspect',
callback=self._spawn_worker,
- call_args=(_do_inspect_hardware, task),
+ call_args=(inspection.inspect_hardware, task),
err_handler=utils.provisioning_error_handler)
except exception.InvalidState:
@@ -3791,53 +3831,3 @@ def do_sync_power_state(task, count):
task, old_power_state)
return count
-
-
-@task_manager.require_exclusive_lock
-def _do_inspect_hardware(task):
- """Initiates inspection.
-
- :param task: a TaskManager instance with an exclusive lock
- on its node.
- :raises: HardwareInspectionFailure if driver doesn't
- return the state as states.MANAGEABLE, states.INSPECTWAIT.
-
- """
- node = task.node
-
- def handle_failure(e, log_func=LOG.error):
- utils.node_history_record(task.node, event=e,
- event_type=states.INTROSPECTION,
- error=True, user=task.context.user_id)
- task.process_event('fail')
- log_func("Failed to inspect node %(node)s: %(err)s",
- {'node': node.uuid, 'err': e})
-
- # Inspection cannot start in fast-track mode, wipe token and URL.
- utils.wipe_token_and_url(task)
-
- try:
- new_state = task.driver.inspect.inspect_hardware(task)
- except exception.IronicException as e:
- with excutils.save_and_reraise_exception():
- error = str(e)
- handle_failure(error)
- except Exception as e:
- error = (_('Unexpected exception of type %(type)s: %(msg)s') %
- {'type': type(e).__name__, 'msg': e})
- handle_failure(error, log_func=LOG.exception)
- raise exception.HardwareInspectionFailure(error=error)
-
- if new_state == states.MANAGEABLE:
- task.process_event('done')
- LOG.info('Successfully inspected node %(node)s',
- {'node': node.uuid})
- elif new_state == states.INSPECTWAIT:
- task.process_event('wait')
- LOG.info('Successfully started introspection on node %(node)s',
- {'node': node.uuid})
- else:
- error = (_("During inspection, driver returned unexpected "
- "state %(state)s") % {'state': new_state})
- handle_failure(error)
- raise exception.HardwareInspectionFailure(error=error)
diff --git a/ironic/conductor/periodics.py b/ironic/conductor/periodics.py
index 70bc7bc93..b9c8f8844 100644
--- a/ironic/conductor/periodics.py
+++ b/ironic/conductor/periodics.py
@@ -18,6 +18,7 @@ import inspect
import eventlet
from futurist import periodics
+from ironic_lib import metrics_utils
from oslo_log import log
from ironic.common import exception
@@ -29,6 +30,9 @@ from ironic.drivers import base as driver_base
LOG = log.getLogger(__name__)
+METRICS = metrics_utils.get_metrics_logger(__name__)
+
+
def periodic(spacing, enabled=True, **kwargs):
"""A decorator to define a periodic task.
@@ -46,7 +50,7 @@ class Stop(Exception):
def node_periodic(purpose, spacing, enabled=True, filters=None,
predicate=None, predicate_extra_fields=(), limit=None,
- shared_task=True):
+ shared_task=True, node_count_metric_name=None):
"""A decorator to define a periodic task to act on nodes.
Defines a periodic task that fetches the list of nodes mapped to the
@@ -84,6 +88,9 @@ def node_periodic(purpose, spacing, enabled=True, filters=None,
iteration to determine the limit.
:param shared_task: if ``True``, the task will have a shared lock. It is
recommended to start with a shared lock and upgrade it only if needed.
+ :param node_count_metric_name: A string value to identify a metric
+ representing the count of matching nodes to be recorded upon the
+ completion of the periodic.
"""
node_type = collections.namedtuple(
'Node',
@@ -116,10 +123,11 @@ def node_periodic(purpose, spacing, enabled=True, filters=None,
else:
local_limit = limit
assert local_limit is None or local_limit > 0
-
+ node_count = 0
nodes = manager.iter_nodes(filters=filters,
fields=predicate_extra_fields)
for (node_uuid, *other) in nodes:
+ node_count += 1
if predicate is not None:
node = node_type(node_uuid, *other)
if accepts_manager:
@@ -158,6 +166,11 @@ def node_periodic(purpose, spacing, enabled=True, filters=None,
local_limit -= 1
if not local_limit:
return
+ if node_count_metric_name:
+ # Send post-run metrics.
+ METRICS.send_gauge(
+ node_count_metric_name,
+ node_count)
return wrapper
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index 509c9ce92..922e74cf6 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -527,7 +527,8 @@ class TaskManager(object):
self.release_resources()
def process_event(self, event, callback=None, call_args=None,
- call_kwargs=None, err_handler=None, target_state=None):
+ call_kwargs=None, err_handler=None, target_state=None,
+ last_error=None):
"""Process the given event for the task's current state.
:param event: the name of the event to process
@@ -540,6 +541,8 @@ class TaskManager(object):
prev_target_state)
:param target_state: if specified, the target provision state for the
node. Otherwise, use the target state from the fsm
+ :param last_error: last error to set on the node together with
+ the state transition.
:raises: InvalidState if the event is not allowed by the associated
state machine
"""
@@ -572,13 +575,15 @@ class TaskManager(object):
# set up the async worker
if callback:
- # clear the error if we're going to start work in a callback
- self.node.last_error = None
+ # update the error if we're going to start work in a callback
+ self.node.last_error = last_error
if call_args is None:
call_args = ()
if call_kwargs is None:
call_kwargs = {}
self.spawn_after(callback, *call_args, **call_kwargs)
+ elif last_error is not None:
+ self.node.last_error = last_error
# publish the state transition by saving the Node
self.node.save()
diff --git a/ironic/conductor/utils.py b/ironic/conductor/utils.py
index c107f076f..2272c0df7 100644
--- a/ironic/conductor/utils.py
+++ b/ironic/conductor/utils.py
@@ -297,14 +297,23 @@ def node_power_action(task, new_state, timeout=None):
node = task.node
if _can_skip_state_change(task, new_state):
+ # NOTE(TheJulia): Even if we are not changing the power state,
+ # we need to wipe the token out, just in case for some reason
+ # the power was turned off outside of our interaction/management.
+ if new_state in (states.POWER_OFF, states.SOFT_POWER_OFF,
+ states.REBOOT, states.SOFT_REBOOT):
+ wipe_internal_info_on_power_off(node)
+ node.save()
return
target_state = _calculate_target_state(new_state)
# Set the target_power_state and clear any last_error, if we're
# starting a new operation. This will expose to other processes
- # and clients that work is in progress.
- node['target_power_state'] = target_state
- node['last_error'] = None
+ # and clients that work is in progress. Keep the last_error intact
+ # if the power action happens as a result of a failure.
+ node.target_power_state = target_state
+ if node.provision_state not in states.FAILURE_STATES:
+ node.last_error = None
node.timestamp_driver_internal_info('last_power_state_change')
# NOTE(dtantsur): wipe token on shutting down, otherwise a reboot in
# fast-track (or an accidentally booted agent) will cause subsequent
@@ -479,9 +488,9 @@ def cleaning_error_handler(task, logmsg, errmsg=None, traceback=False,
node.del_driver_internal_info('cleaning_reboot')
node.del_driver_internal_info('cleaning_polling')
node.del_driver_internal_info('skip_current_clean_step')
- # We don't need to keep the old agent URL
+ # We don't need to keep the old agent URL, or token
# as it should change upon the next cleaning attempt.
- node.del_driver_internal_info('agent_url')
+ wipe_token_and_url(task)
# For manual cleaning, the target provision state is MANAGEABLE, whereas
# for automated cleaning, it is AVAILABLE.
manual_clean = node.target_provision_state == states.MANAGEABLE
diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py
index c1a893181..648395362 100644
--- a/ironic/conf/__init__.py
+++ b/ironic/conf/__init__.py
@@ -29,6 +29,7 @@ from ironic.conf import deploy
from ironic.conf import dhcp
from ironic.conf import dnsmasq
from ironic.conf import drac
+from ironic.conf import fake
from ironic.conf import glance
from ironic.conf import healthcheck
from ironic.conf import ibmc
@@ -44,6 +45,7 @@ from ironic.conf import neutron
from ironic.conf import nova
from ironic.conf import pxe
from ironic.conf import redfish
+from ironic.conf import sensor_data
from ironic.conf import service_catalog
from ironic.conf import snmp
from ironic.conf import swift
@@ -65,6 +67,7 @@ deploy.register_opts(CONF)
drac.register_opts(CONF)
dhcp.register_opts(CONF)
dnsmasq.register_opts(CONF)
+fake.register_opts(CONF)
glance.register_opts(CONF)
healthcheck.register_opts(CONF)
ibmc.register_opts(CONF)
@@ -80,6 +83,7 @@ neutron.register_opts(CONF)
nova.register_opts(CONF)
pxe.register_opts(CONF)
redfish.register_opts(CONF)
+sensor_data.register_opts(CONF)
service_catalog.register_opts(CONF)
snmp.register_opts(CONF)
swift.register_opts(CONF)
diff --git a/ironic/conf/conductor.py b/ironic/conf/conductor.py
index 2161b9434..653e30f56 100644
--- a/ironic/conf/conductor.py
+++ b/ironic/conf/conductor.py
@@ -97,41 +97,6 @@ opts = [
cfg.IntOpt('node_locked_retry_interval',
default=1,
help=_('Seconds to sleep between node lock attempts.')),
- cfg.BoolOpt('send_sensor_data',
- default=False,
- help=_('Enable sending sensor data message via the '
- 'notification bus')),
- cfg.IntOpt('send_sensor_data_interval',
- default=600,
- min=1,
- help=_('Seconds between conductor sending sensor data message '
- 'to ceilometer via the notification bus.')),
- cfg.IntOpt('send_sensor_data_workers',
- default=4, min=1,
- help=_('The maximum number of workers that can be started '
- 'simultaneously for send data from sensors periodic '
- 'task.')),
- cfg.IntOpt('send_sensor_data_wait_timeout',
- default=300,
- help=_('The time in seconds to wait for send sensors data '
- 'periodic task to be finished before allowing periodic '
- 'call to happen again. Should be less than '
- 'send_sensor_data_interval value.')),
- cfg.ListOpt('send_sensor_data_types',
- default=['ALL'],
- help=_('List of comma separated meter types which need to be'
- ' sent to Ceilometer. The default value, "ALL", is a '
- 'special value meaning send all the sensor data.')),
- cfg.BoolOpt('send_sensor_data_for_undeployed_nodes',
- default=False,
- help=_('The default for sensor data collection is to only '
- 'collect data for machines that are deployed, however '
- 'operators may desire to know if there are failures '
- 'in hardware that is not presently in use. '
- 'When set to true, the conductor will collect sensor '
- 'information from all nodes when sensor data '
- 'collection is enabled via the send_sensor_data '
- 'setting.')),
cfg.IntOpt('sync_local_state_interval',
default=180,
help=_('When conductors join or leave the cluster, existing '
diff --git a/ironic/conf/default.py b/ironic/conf/default.py
index c7aff69cc..5b40c1f31 100644
--- a/ironic/conf/default.py
+++ b/ironic/conf/default.py
@@ -426,8 +426,9 @@ webserver_opts = [
'Defaults to True.')),
cfg.IntOpt('webserver_connection_timeout',
default=60,
- help=_('Connection timeout when accessing remote web servers '
- 'with images.')),
+ help=_('Connection timeout when accessing/interacting with '
+ 'remote web servers with images or other artifacts '
+ 'being accessed.')),
]
diff --git a/ironic/conf/fake.py b/ironic/conf/fake.py
new file mode 100644
index 000000000..8f6d75ee3
--- /dev/null
+++ b/ironic/conf/fake.py
@@ -0,0 +1,85 @@
+#
+# Copyright 2022 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from ironic.common.i18n import _
+
+opts = [
+ cfg.StrOpt('power_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'power driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('boot_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'boot driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('deploy_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'deploy driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('vendor_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'vendor driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('management_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'management driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('inspect_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'inspect driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('raid_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'raid driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('bios_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'bios driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('storage_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'storage driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('rescue_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'rescue driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+]
+
+
+def register_opts(conf):
+ conf.register_opts(opts, group='fake')
diff --git a/ironic/conf/inventory.py b/ironic/conf/inventory.py
index 52f31bf60..47e88c6f9 100644
--- a/ironic/conf/inventory.py
+++ b/ironic/conf/inventory.py
@@ -17,16 +17,17 @@ from ironic.common.i18n import _
opts = [
cfg.StrOpt('data_backend',
- help=_('The storage backend for storing introspection data.'),
- choices=[('none', _('introspection data will not be stored')),
- ('database', _('introspection data stored in an SQL '
- 'database')),
- ('swift', _('introspection data stored in Swift'))],
+ help=_('The storage backend for storing inspection data.'),
+ choices=[
+ ('none', _('do not store inspection data')),
+ ('database', _('store in the service database')),
+ ('swift', _('store in the Object Storage (swift)')),
+ ],
default='database'),
cfg.StrOpt('swift_data_container',
default='introspection_data_container',
- help=_('The Swift introspection data container to store '
- 'the inventory data.')),
+ help=_('The Swift container prefix to store the inspection '
+ 'data (separately inventory and plugin data).')),
]
diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py
index 846949893..a7ebcfb30 100644
--- a/ironic/conf/opts.py
+++ b/ironic/conf/opts.py
@@ -43,6 +43,7 @@ _opts = [
('nova', ironic.conf.nova.list_opts()),
('pxe', ironic.conf.pxe.opts),
('redfish', ironic.conf.redfish.opts),
+ ('sensor_data', ironic.conf.sensor_data.opts),
('service_catalog', ironic.conf.service_catalog.list_opts()),
('snmp', ironic.conf.snmp.opts),
('swift', ironic.conf.swift.list_opts()),
diff --git a/ironic/conf/sensor_data.py b/ironic/conf/sensor_data.py
new file mode 100644
index 000000000..8527113a6
--- /dev/null
+++ b/ironic/conf/sensor_data.py
@@ -0,0 +1,89 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from ironic.common.i18n import _
+
+opts = [
+ cfg.BoolOpt('send_sensor_data',
+ default=False,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data',
+ help=_('Enable sending sensor data message via the '
+ 'notification bus.')),
+ cfg.IntOpt('interval',
+ default=600,
+ min=1,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_interval',
+ help=_('Seconds between conductor sending sensor data message '
+ 'via the notification bus. This was originally for '
+ 'consumption via ceilometer, but the data may also '
+ 'be consumed via a plugin like '
+ 'ironic-prometheus-exporter or any other message bus '
+ 'data collector.')),
+ cfg.IntOpt('workers',
+ default=4, min=1,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_workers',
+ help=_('The maximum number of workers that can be started '
+ 'simultaneously for send data from sensors periodic '
+ 'task.')),
+ cfg.IntOpt('wait_timeout',
+ default=300,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_wait_timeout',
+ help=_('The time in seconds to wait for send sensors data '
+ 'periodic task to be finished before allowing periodic '
+ 'call to happen again. Should be less than '
+ 'send_sensor_data_interval value.')),
+ cfg.ListOpt('data_types',
+ default=['ALL'],
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_types',
+ help=_('List of comma separated meter types which need to be '
+ 'sent to Ceilometer. The default value, "ALL", is a '
+ 'special value meaning send all the sensor data. '
+ 'This setting only applies to baremetal sensor data '
+ 'being processed through the conductor.')),
+ cfg.BoolOpt('enable_for_undeployed_nodes',
+ default=False,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_for_undeployed_nodes',
+ help=_('The default for sensor data collection is to only '
+ 'collect data for machines that are deployed, however '
+ 'operators may desire to know if there are failures '
+ 'in hardware that is not presently in use. '
+ 'When set to true, the conductor will collect sensor '
+ 'information from all nodes when sensor data '
+ 'collection is enabled via the send_sensor_data '
+ 'setting.')),
+ cfg.BoolOpt('enable_for_conductor',
+ default=True,
+ help=_('If to include sensor metric data for the Conductor '
+ 'process itself in the message payload for sensor '
+ 'data which allows operators to gather instance '
+ 'counts of actions and states to better manage '
+ 'the deployment.')),
+ cfg.BoolOpt('enable_for_nodes',
+ default=True,
+ help=_('If to transmit any sensor data for any nodes under '
+ 'this conductor\'s management. This option superceeds '
+ 'the ``send_sensor_data_for_undeployed_nodes`` '
+ 'setting.')),
+]
+
+
+def register_opts(conf):
+ conf.register_opts(opts, group='sensor_data')
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index d5f4a9d65..93a211fc3 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -1834,6 +1834,9 @@ class Connection(api.Connection):
max_to_migrate = max_count or total_to_migrate
for model in sql_models:
+ use_node_id = False
+ if (not hasattr(model, 'id') and hasattr(model, 'node_id')):
+ use_node_id = True
version = mapping[model.__name__][0]
num_migrated = 0
with _session_for_write() as session:
@@ -1847,13 +1850,27 @@ class Connection(api.Connection):
# max_to_migrate objects.
ids = []
for obj in query.slice(0, max_to_migrate):
- ids.append(obj['id'])
- num_migrated = (
- session.query(model).
- filter(sql.and_(model.id.in_(ids),
- model.version != version)).
- update({model.version: version},
- synchronize_session=False))
+ if not use_node_id:
+ ids.append(obj['id'])
+ else:
+ # BIOSSettings, NodeTrait, NodeTag do not have id
+ # columns, fallback to node_id as they both have
+ # it.
+ ids.append(obj['node_id'])
+ if not use_node_id:
+ num_migrated = (
+ session.query(model).
+ filter(sql.and_(model.id.in_(ids),
+ model.version != version)).
+ update({model.version: version},
+ synchronize_session=False))
+ else:
+ num_migrated = (
+ session.query(model).
+ filter(sql.and_(model.node_id.in_(ids),
+ model.version != version)).
+ update({model.version: version},
+ synchronize_session=False))
else:
num_migrated = (
session.query(model).
diff --git a/ironic/drivers/modules/ansible/playbooks/library/stream_url.py b/ironic/drivers/modules/ansible/playbooks/library/stream_url.py
index 0da3cc4dd..786b65013 100644
--- a/ironic/drivers/modules/ansible/playbooks/library/stream_url.py
+++ b/ironic/drivers/modules/ansible/playbooks/library/stream_url.py
@@ -31,7 +31,8 @@ class StreamingDownloader(object):
else:
self.hasher = None
self.chunksize = chunksize
- resp = requests.get(url, stream=True, verify=verify, cert=certs)
+ resp = requests.get(url, stream=True, verify=verify, cert=certs,
+ timeout=30)
if resp.status_code != 200:
raise Exception('Invalid response code: %s' % resp.status_code)
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index 13f91e9cd..fd83f9f08 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -1093,6 +1093,11 @@ def _cache_and_convert_image(task, instance_info, image_info=None):
_, image_path = cache_instance_image(task.context, task.node,
force_raw=force_raw)
if force_raw or image_info is None:
+ if image_info is None:
+ initial_format = instance_info.get('image_disk_format')
+ else:
+ initial_format = image_info.get('disk_format')
+
if force_raw:
instance_info['image_disk_format'] = 'raw'
else:
@@ -1108,21 +1113,29 @@ def _cache_and_convert_image(task, instance_info, image_info=None):
# sha256.
if image_info is None:
os_hash_algo = instance_info.get('image_os_hash_algo')
+ hash_value = instance_info.get('image_os_hash_value')
+ old_checksum = instance_info.get('image_checksum')
else:
os_hash_algo = image_info.get('os_hash_algo')
+ hash_value = image_info.get('os_hash_value')
+ old_checksum = image_info.get('checksum')
+
+ if initial_format != instance_info['image_disk_format']:
+ if not os_hash_algo or os_hash_algo == 'md5':
+ LOG.debug("Checksum algorithm for image %(image)s for node "
+ "%(node)s is set to '%(algo)s', changing to sha256",
+ {'algo': os_hash_algo, 'node': task.node.uuid,
+ 'image': image_path})
+ os_hash_algo = 'sha256'
+
+ LOG.debug('Recalculating checksum for image %(image)s for node '
+ '%(node)s due to image conversion',
+ {'image': image_path, 'node': task.node.uuid})
+ instance_info['image_checksum'] = None
+ hash_value = compute_image_checksum(image_path, os_hash_algo)
+ else:
+ instance_info['image_checksum'] = old_checksum
- if not os_hash_algo or os_hash_algo == 'md5':
- LOG.debug("Checksum algorithm for image %(image)s for node "
- "%(node)s is set to '%(algo)s', changing to 'sha256'",
- {'algo': os_hash_algo, 'node': task.node.uuid,
- 'image': image_path})
- os_hash_algo = 'sha256'
-
- LOG.debug('Recalculating checksum for image %(image)s for node '
- '%(node)s due to image conversion',
- {'image': image_path, 'node': task.node.uuid})
- instance_info['image_checksum'] = None
- hash_value = compute_image_checksum(image_path, os_hash_algo)
instance_info['image_os_hash_algo'] = os_hash_algo
instance_info['image_os_hash_value'] = hash_value
else:
diff --git a/ironic/drivers/modules/fake.py b/ironic/drivers/modules/fake.py
index dffd9065d..0a26efb4c 100644
--- a/ironic/drivers/modules/fake.py
+++ b/ironic/drivers/modules/fake.py
@@ -24,6 +24,9 @@ functionality between a power interface and a deploy interface, when both rely
on separate vendor_passthru methods.
"""
+import random
+import time
+
from oslo_log import log
from ironic.common import boot_devices
@@ -32,6 +35,7 @@ from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import indicator_states
from ironic.common import states
+from ironic.conf import CONF
from ironic.drivers import base
from ironic import objects
@@ -39,6 +43,34 @@ from ironic import objects
LOG = log.getLogger(__name__)
+def parse_sleep_range(sleep_range):
+ if not sleep_range:
+ return 0, 0
+
+ sleep_split = sleep_range.split(',')
+ if len(sleep_split) == 1:
+ a = sleep_split[0]
+ b = sleep_split[0]
+ else:
+ a = sleep_split[0]
+ b = sleep_split[1]
+ return int(a), int(b)
+
+
+def sleep(sleep_range):
+ earliest, latest = parse_sleep_range(sleep_range)
+ if earliest == 0 and latest == 0:
+ # no sleep
+ return
+ if earliest == latest:
+ # constant sleep
+ sleep = earliest
+ else:
+ # triangular random sleep, weighted towards the earliest
+ sleep = random.triangular(earliest, latest, earliest)
+ time.sleep(sleep)
+
+
class FakePower(base.PowerInterface):
"""Example implementation of a simple power interface."""
@@ -49,12 +81,15 @@ class FakePower(base.PowerInterface):
pass
def get_power_state(self, task):
+ sleep(CONF.fake.power_delay)
return task.node.power_state
def reboot(self, task, timeout=None):
+ sleep(CONF.fake.power_delay)
pass
def set_power_state(self, task, power_state, timeout=None):
+ sleep(CONF.fake.power_delay)
if power_state not in [states.POWER_ON, states.POWER_OFF,
states.SOFT_REBOOT, states.SOFT_POWER_OFF]:
raise exception.InvalidParameterValue(
@@ -81,15 +116,19 @@ class FakeBoot(base.BootInterface):
pass
def prepare_ramdisk(self, task, ramdisk_params, mode='deploy'):
+ sleep(CONF.fake.boot_delay)
pass
def clean_up_ramdisk(self, task, mode='deploy'):
+ sleep(CONF.fake.boot_delay)
pass
def prepare_instance(self, task):
+ sleep(CONF.fake.boot_delay)
pass
def clean_up_instance(self, task):
+ sleep(CONF.fake.boot_delay)
pass
@@ -108,18 +147,23 @@ class FakeDeploy(base.DeployInterface):
@base.deploy_step(priority=100)
def deploy(self, task):
+ sleep(CONF.fake.deploy_delay)
return None
def tear_down(self, task):
+ sleep(CONF.fake.deploy_delay)
return states.DELETED
def prepare(self, task):
+ sleep(CONF.fake.deploy_delay)
pass
def clean_up(self, task):
+ sleep(CONF.fake.deploy_delay)
pass
def take_over(self, task):
+ sleep(CONF.fake.deploy_delay)
pass
@@ -140,6 +184,7 @@ class FakeVendorA(base.VendorInterface):
@base.passthru(['POST'],
description=_("Test if the value of bar is baz"))
def first_method(self, task, http_method, bar):
+ sleep(CONF.fake.vendor_delay)
return True if bar == 'baz' else False
@@ -161,16 +206,19 @@ class FakeVendorB(base.VendorInterface):
@base.passthru(['POST'],
description=_("Test if the value of bar is kazoo"))
def second_method(self, task, http_method, bar):
+ sleep(CONF.fake.vendor_delay)
return True if bar == 'kazoo' else False
@base.passthru(['POST'], async_call=False,
description=_("Test if the value of bar is meow"))
def third_method_sync(self, task, http_method, bar):
+ sleep(CONF.fake.vendor_delay)
return True if bar == 'meow' else False
@base.passthru(['POST'], require_exclusive_lock=False,
description=_("Test if the value of bar is woof"))
def fourth_method_shared_lock(self, task, http_method, bar):
+ sleep(CONF.fake.vendor_delay)
return True if bar == 'woof' else False
@@ -211,17 +259,21 @@ class FakeManagement(base.ManagementInterface):
return [boot_devices.PXE]
def set_boot_device(self, task, device, persistent=False):
+ sleep(CONF.fake.management_delay)
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
def get_boot_device(self, task):
+ sleep(CONF.fake.management_delay)
return {'boot_device': boot_devices.PXE, 'persistent': False}
def get_sensors_data(self, task):
+ sleep(CONF.fake.management_delay)
return {}
def get_supported_indicators(self, task, component=None):
+ sleep(CONF.fake.management_delay)
indicators = {
components.CHASSIS: {
'led-0': {
@@ -248,6 +300,7 @@ class FakeManagement(base.ManagementInterface):
if not component or component == c}
def get_indicator_state(self, task, component, indicator):
+ sleep(CONF.fake.management_delay)
indicators = self.get_supported_indicators(task)
if component not in indicators:
raise exception.InvalidParameterValue(_(
@@ -271,6 +324,7 @@ class FakeInspect(base.InspectInterface):
pass
def inspect_hardware(self, task):
+ sleep(CONF.fake.inspect_delay)
return states.MANAGEABLE
@@ -282,9 +336,11 @@ class FakeRAID(base.RAIDInterface):
def create_configuration(self, task, create_root_volume=True,
create_nonroot_volumes=True):
+ sleep(CONF.fake.raid_delay)
pass
def delete_configuration(self, task):
+ sleep(CONF.fake.raid_delay)
pass
@@ -302,6 +358,7 @@ class FakeBIOS(base.BIOSInterface):
'to contain a dictionary with name/value pairs'),
'required': True}})
def apply_configuration(self, task, settings):
+ sleep(CONF.fake.bios_delay)
# Note: the implementation of apply_configuration in fake interface
# is just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
@@ -328,6 +385,7 @@ class FakeBIOS(base.BIOSInterface):
@base.clean_step(priority=0)
def factory_reset(self, task):
+ sleep(CONF.fake.bios_delay)
# Note: the implementation of factory_reset in fake interface is
# just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
@@ -340,6 +398,7 @@ class FakeBIOS(base.BIOSInterface):
@base.clean_step(priority=0)
def cache_bios_settings(self, task):
+ sleep(CONF.fake.bios_delay)
# Note: the implementation of cache_bios_settings in fake interface
# is just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
@@ -357,9 +416,11 @@ class FakeStorage(base.StorageInterface):
return {}
def attach_volumes(self, task):
+ sleep(CONF.fake.storage_delay)
pass
def detach_volumes(self, task):
+ sleep(CONF.fake.storage_delay)
pass
def should_write_image(self, task):
@@ -376,7 +437,9 @@ class FakeRescue(base.RescueInterface):
pass
def rescue(self, task):
+ sleep(CONF.fake.rescue_delay)
return states.RESCUE
def unrescue(self, task):
+ sleep(CONF.fake.rescue_delay)
return states.ACTIVE
diff --git a/ironic/drivers/modules/ilo/boot.py b/ironic/drivers/modules/ilo/boot.py
index e29852981..fe2cef02a 100644
--- a/ironic/drivers/modules/ilo/boot.py
+++ b/ironic/drivers/modules/ilo/boot.py
@@ -604,6 +604,12 @@ class IloPXEBoot(pxe.PXEBoot):
else:
# Volume boot in BIOS boot mode is handled using
# PXE boot interface
+ boot_option = deploy_utils.get_boot_option(task.node)
+ if boot_option == "kickstart":
+ if task.node.provision_state in (states.DEPLOYING,
+ states.RESCUING,
+ states.CLEANING):
+ prepare_node_for_deploy(task)
super(IloPXEBoot, self).prepare_instance(task)
@METRICS.timer('IloPXEBoot.clean_up_instance')
@@ -696,6 +702,12 @@ class IloiPXEBoot(ipxe.iPXEBoot):
else:
# Volume boot in BIOS boot mode is handled using
# PXE boot interface
+ boot_option = deploy_utils.get_boot_option(task.node)
+ if boot_option == "kickstart":
+ if task.node.provision_state in (states.DEPLOYING,
+ states.RESCUING,
+ states.CLEANING):
+ prepare_node_for_deploy(task)
super(IloiPXEBoot, self).prepare_instance(task)
@METRICS.timer('IloiPXEBoot.clean_up_instance')
diff --git a/ironic/drivers/modules/inspect_utils.py b/ironic/drivers/modules/inspect_utils.py
index 0089302c1..f87fcc82b 100644
--- a/ironic/drivers/modules/inspect_utils.py
+++ b/ironic/drivers/modules/inspect_utils.py
@@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
_OBJECT_NAME_PREFIX = 'inspector_data'
-def create_ports_if_not_exist(task, macs):
+def create_ports_if_not_exist(task, macs=None):
"""Create ironic ports from MAC addresses data dict.
Creates ironic ports from MAC addresses data returned with inspection or
@@ -36,8 +36,17 @@ def create_ports_if_not_exist(task, macs):
pair.
:param task: A TaskManager instance.
- :param macs: A sequence of MAC addresses.
+ :param macs: A sequence of MAC addresses. If ``None``, fetched from
+ the task's management interface.
"""
+ if macs is None:
+ macs = task.driver.management.get_mac_addresses(task)
+ if not macs:
+ LOG.warning("Not attempting to create any port as no NICs "
+ "were discovered in 'enabled' state for node %s",
+ task.node.uuid)
+ return
+
node = task.node
for mac in macs:
if not netutils.is_valid_mac(mac):
@@ -59,101 +68,88 @@ def create_ports_if_not_exist(task, macs):
def clean_up_swift_entries(task):
- """Delete swift entries containing introspection data.
+ """Delete swift entries containing inspection data.
Delete swift entries related to the node in task.node containing
- introspection data. The entries are
+ inspection data. The entries are
``inspector_data-<task.node.uuid>-inventory`` for hardware inventory and
- similar for ``-plugin`` containing the rest of the introspection data.
+ similar for ``-plugin`` containing the rest of the inspection data.
:param task: A TaskManager instance.
"""
if CONF.inventory.data_backend != 'swift':
return
swift_api = swift.SwiftAPI()
- swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, task.node.uuid)
container = CONF.inventory.swift_data_container
- inventory_obj_name = swift_object_name + '-inventory'
- plugin_obj_name = swift_object_name + '-plugin'
+ inventory_obj_name = f'{_OBJECT_NAME_PREFIX}-{task.node.uuid}-inventory'
+ plugin_obj_name = f'{_OBJECT_NAME_PREFIX}-{task.node.uuid}-plugin'
try:
swift_api.delete_object(inventory_obj_name, container)
except swiftclient.exceptions.ClientException as e:
- if e.http_status == 404:
- # 404 -> entry did not exist - acceptable.
- pass
- else:
- LOG.error("Object %(obj)s related to node %(node)s "
- "failed to be deleted with expection: %(e)s",
+ if e.http_status != 404:
+ LOG.error("Object %(obj)s in container %(cont)s with inventory "
+ "for node %(node)s failed to be deleted: %(e)s",
{'obj': inventory_obj_name, 'node': task.node.uuid,
- 'e': e})
+ 'e': e, 'cont': container})
raise exception.SwiftObjectStillExists(obj=inventory_obj_name,
node=task.node.uuid)
try:
swift_api.delete_object(plugin_obj_name, container)
except swiftclient.exceptions.ClientException as e:
- if e.http_status == 404:
- # 404 -> entry did not exist - acceptable.
- pass
- else:
- LOG.error("Object %(obj)s related to node %(node)s "
- "failed to be deleted with exception: %(e)s",
+ if e.http_status != 404:
+ LOG.error("Object %(obj)s in container %(cont)s with plugin data "
+ "for node %(node)s failed to be deleted: %(e)s",
{'obj': plugin_obj_name, 'node': task.node.uuid,
- 'e': e})
+ 'e': e, 'cont': container})
raise exception.SwiftObjectStillExists(obj=plugin_obj_name,
node=task.node.uuid)
-def store_introspection_data(node, introspection_data, context):
- """Store introspection data.
+def store_inspection_data(node, inventory, plugin_data, context):
+ """Store inspection data.
- Store the introspection data for a node. Either to database
- or swift as configured.
+ Store the inspection data for a node. The storage is either the database
+ or the Object Storage API (swift/radosgw) as configured.
- :param node: the Ironic node that the introspection data is about
- :param introspection_data: the data to store
+ :param node: the Ironic node that the inspection data is about
+ :param inventory: the inventory to store
+ :param plugin_data: the plugin data (if any) to store
:param context: an admin context
"""
# If store_data == 'none', do not store the data
store_data = CONF.inventory.data_backend
if store_data == 'none':
- LOG.debug('Introspection data storage is disabled, the data will '
- 'not be saved for node %(node)s', {'node': node.uuid})
+ LOG.debug('Inspection data storage is disabled, the data will '
+ 'not be saved for node %s', node.uuid)
return
- inventory_data = introspection_data.pop("inventory")
- plugin_data = introspection_data
if store_data == 'database':
node_inventory.NodeInventory(
context,
node_id=node.id,
- inventory_data=inventory_data,
+ inventory_data=inventory,
plugin_data=plugin_data).create()
- LOG.info('Introspection data was stored in database for node '
- '%(node)s', {'node': node.uuid})
+ LOG.info('Inspection data was stored in database for node %s',
+ node.uuid)
if store_data == 'swift':
- swift_object_name = _store_introspection_data_in_swift(
+ swift_object_name = _store_inspection_data_in_swift(
node_uuid=node.uuid,
- inventory_data=inventory_data,
+ inventory_data=inventory,
plugin_data=plugin_data)
- LOG.info('Introspection data was stored for node %(node)s in Swift'
- ' object %(obj_name)s-inventory and %(obj_name)s-plugin',
+ LOG.info('Inspection data was stored in Swift for node %(node)s: '
+ 'objects %(obj_name)s-inventory and %(obj_name)s-plugin',
{'node': node.uuid, 'obj_name': swift_object_name})
-def _node_inventory_convert(node_inventory):
- inventory_data = node_inventory['inventory_data']
- plugin_data = node_inventory['plugin_data']
- return {"inventory": inventory_data, "plugin_data": plugin_data}
-
-
-def get_introspection_data(node, context):
- """Get introspection data.
+def get_inspection_data(node, context):
+ """Get inspection data.
- Retrieve the introspection data for a node. Either from database
- or swift as configured.
+ Retrieve the inspection data for a node either from database
+ or the Object Storage API (swift/radosgw) as configured.
- :param node_id: the Ironic node that the required data is about
+ :param node: the Ironic node that the required data is about
:param context: an admin context
:returns: dictionary with ``inventory`` and ``plugin_data`` fields
+ :raises: NodeInventoryNotFound if no inventory has been saved
"""
store_data = CONF.inventory.data_backend
if store_data == 'none':
@@ -161,58 +157,57 @@ def get_introspection_data(node, context):
if store_data == 'database':
node_inventory = objects.NodeInventory.get_by_node_id(
context, node.id)
- return _node_inventory_convert(node_inventory)
+ return {"inventory": node_inventory.inventory_data,
+ "plugin_data": node_inventory.plugin_data}
if store_data == 'swift':
try:
- node_inventory = _get_introspection_data_from_swift(node.uuid)
+ return _get_inspection_data_from_swift(node.uuid)
except exception.SwiftObjectNotFoundError:
raise exception.NodeInventoryNotFound(node=node.uuid)
- return node_inventory
-def _store_introspection_data_in_swift(node_uuid, inventory_data, plugin_data):
- """Uploads introspection data to Swift.
+def _store_inspection_data_in_swift(node_uuid, inventory_data, plugin_data):
+ """Uploads inspection data to Swift.
:param data: data to store in Swift
:param node_id: ID of the Ironic node that the data came from
:returns: name of the Swift object that the data is stored in
"""
swift_api = swift.SwiftAPI()
- swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
+ swift_object_name = f'{_OBJECT_NAME_PREFIX}-{node_uuid}'
container = CONF.inventory.swift_data_container
- swift_api.create_object_from_data(swift_object_name + '-inventory',
+ swift_api.create_object_from_data(f'{swift_object_name}-inventory',
inventory_data,
container)
- swift_api.create_object_from_data(swift_object_name + '-plugin',
+ swift_api.create_object_from_data(f'{swift_object_name}-plugin',
plugin_data,
container)
return swift_object_name
-def _get_introspection_data_from_swift(node_uuid):
- """Get introspection data from Swift.
+def _get_inspection_data_from_swift(node_uuid):
+ """Get inspection data from Swift.
:param node_uuid: UUID of the Ironic node that the data came from
:returns: dictionary with ``inventory`` and ``plugin_data`` fields
"""
swift_api = swift.SwiftAPI()
- swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
container = CONF.inventory.swift_data_container
- inv_obj = swift_object_name + '-inventory'
- plug_obj = swift_object_name + '-plugin'
+ inv_obj = f'{_OBJECT_NAME_PREFIX}-{node_uuid}-inventory'
+ plug_obj = f'{_OBJECT_NAME_PREFIX}-{node_uuid}-plugin'
try:
inventory_data = swift_api.get_object(inv_obj, container)
except exception.SwiftOperationError:
- LOG.error("Failed to retrieve object %(obj)s from swift",
- {'obj': inv_obj})
+ LOG.error("Failed to retrieve object %(obj)s from container %(cont)s",
+ {'obj': inv_obj, 'cont': container})
raise exception.SwiftObjectNotFoundError(obj=inv_obj,
container=container,
operation='get')
try:
plugin_data = swift_api.get_object(plug_obj, container)
except exception.SwiftOperationError:
- LOG.error("Failed to retrieve object %(obj)s from swift",
- {'obj': plug_obj})
+ LOG.error("Failed to retrieve object %(obj)s from container %(cont)s",
+ {'obj': plug_obj, 'cont': container})
raise exception.SwiftObjectNotFoundError(obj=plug_obj,
container=container,
operation='get')
diff --git a/ironic/drivers/modules/inspector/__init__.py b/ironic/drivers/modules/inspector/__init__.py
new file mode 100644
index 000000000..bb2dd43c8
--- /dev/null
+++ b/ironic/drivers/modules/inspector/__init__.py
@@ -0,0 +1,15 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironic.drivers.modules.inspector.interface import Inspector
+
+__all__ = ['Inspector']
diff --git a/ironic/drivers/modules/inspector/client.py b/ironic/drivers/modules/inspector/client.py
new file mode 100644
index 000000000..7e996492e
--- /dev/null
+++ b/ironic/drivers/modules/inspector/client.py
@@ -0,0 +1,57 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Client helper for ironic-inspector."""
+
+from keystoneauth1 import exceptions as ks_exception
+import openstack
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import keystone
+from ironic.conf import CONF
+
+
+_INSPECTOR_SESSION = None
+
+
+def _get_inspector_session(**kwargs):
+ global _INSPECTOR_SESSION
+ if not _INSPECTOR_SESSION:
+ if CONF.auth_strategy != 'keystone':
+ # NOTE(dtantsur): using set_default instead of set_override because
+ # the native keystoneauth option must have priority.
+ CONF.set_default('auth_type', 'none', group='inspector')
+ service_auth = keystone.get_auth('inspector')
+ _INSPECTOR_SESSION = keystone.get_session('inspector',
+ auth=service_auth,
+ **kwargs)
+ return _INSPECTOR_SESSION
+
+
+def get_client(context):
+ """Helper to get inspector client instance."""
+ session = _get_inspector_session()
+ # NOTE(dtantsur): openstacksdk expects config option groups to match
+ # service name, but we use just "inspector".
+ conf = dict(CONF)
+ conf['ironic-inspector'] = conf.pop('inspector')
+ # TODO(pas-ha) investigate possibility of passing user context here,
+ # similar to what neutron/glance-related code does
+ try:
+ return openstack.connection.Connection(
+ session=session,
+ oslo_conf=conf).baremetal_introspection
+ except ks_exception.DiscoveryFailure as exc:
+ raise exception.ConfigInvalid(
+ _("Could not contact ironic-inspector for version discovery: %s")
+ % exc)
diff --git a/ironic/drivers/modules/inspector.py b/ironic/drivers/modules/inspector/interface.py
index dbf171714..e72077003 100644
--- a/ironic/drivers/modules/inspector.py
+++ b/ironic/drivers/modules/inspector/interface.py
@@ -15,18 +15,13 @@ Modules required to work with ironic_inspector:
https://pypi.org/project/ironic-inspector
"""
-import ipaddress
-import shlex
from urllib import parse as urlparse
import eventlet
-from keystoneauth1 import exceptions as ks_exception
-import openstack
from oslo_log import log as logging
from ironic.common import exception
from ironic.common.i18n import _
-from ironic.common import keystone
from ironic.common import states
from ironic.common import utils
from ironic.conductor import periodics
@@ -36,61 +31,22 @@ from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import inspect_utils
+from ironic.drivers.modules.inspector import client
LOG = logging.getLogger(__name__)
-_INSPECTOR_SESSION = None
# Internal field to mark whether ironic or inspector manages boot for the node
_IRONIC_MANAGES_BOOT = 'inspector_manage_boot'
-def _get_inspector_session(**kwargs):
- global _INSPECTOR_SESSION
- if not _INSPECTOR_SESSION:
- if CONF.auth_strategy != 'keystone':
- # NOTE(dtantsur): using set_default instead of set_override because
- # the native keystoneauth option must have priority.
- CONF.set_default('auth_type', 'none', group='inspector')
- service_auth = keystone.get_auth('inspector')
- _INSPECTOR_SESSION = keystone.get_session('inspector',
- auth=service_auth,
- **kwargs)
- return _INSPECTOR_SESSION
-
-
-def _get_client(context):
- """Helper to get inspector client instance."""
- session = _get_inspector_session()
- # NOTE(dtantsur): openstacksdk expects config option groups to match
- # service name, but we use just "inspector".
- conf = dict(CONF)
- conf['ironic-inspector'] = conf.pop('inspector')
- # TODO(pas-ha) investigate possibility of passing user context here,
- # similar to what neutron/glance-related code does
- try:
- return openstack.connection.Connection(
- session=session,
- oslo_conf=conf).baremetal_introspection
- except ks_exception.DiscoveryFailure as exc:
- raise exception.ConfigInvalid(
- _("Could not contact ironic-inspector for version discovery: %s")
- % exc)
-
-
def _get_callback_endpoint(client):
root = CONF.inspector.callback_endpoint_override or client.get_endpoint()
if root == 'mdns':
return root
parts = urlparse.urlsplit(root)
- is_loopback = False
- try:
- # ip_address requires a unicode string on Python 2
- is_loopback = ipaddress.ip_address(parts.hostname).is_loopback
- except ValueError: # host name
- is_loopback = (parts.hostname == 'localhost')
- if is_loopback:
+ if utils.is_loopback(parts.hostname):
raise exception.InvalidParameterValue(
_('Loopback address %s cannot be used as an introspection '
'callback URL') % parts.hostname)
@@ -181,26 +137,14 @@ def _ironic_manages_boot(task, raise_exc=False):
return True
-def _parse_kernel_params():
- """Parse kernel params from the configuration."""
- result = {}
- for s in shlex.split(CONF.inspector.extra_kernel_params):
- try:
- key, value = s.split('=', 1)
- except ValueError:
- result[s] = None
- else:
- result[key] = value
- return result
-
-
def _start_managed_inspection(task):
"""Start inspection managed by ironic."""
try:
- client = _get_client(task.context)
- endpoint = _get_callback_endpoint(client)
- params = dict(_parse_kernel_params(),
- **{'ipa-inspection-callback-url': endpoint})
+ cli = client.get_client(task.context)
+ endpoint = _get_callback_endpoint(cli)
+ params = dict(
+ utils.parse_kernel_params(CONF.inspector.extra_kernel_params),
+ **{'ipa-inspection-callback-url': endpoint})
if utils.fast_track_enabled(task.node):
params['ipa-api-url'] = deploy_utils.get_ironic_api_url()
@@ -208,7 +152,7 @@ def _start_managed_inspection(task):
with cond_utils.power_state_for_network_configuration(task):
task.driver.network.add_inspection_network(task)
task.driver.boot.prepare_ramdisk(task, ramdisk_params=params)
- client.start_introspection(task.node.uuid, manage_boot=False)
+ cli.start_introspection(task.node.uuid, manage_boot=False)
cond_utils.node_power_action(task, states.POWER_ON)
except Exception as exc:
LOG.exception('Unable to start managed inspection for node %(uuid)s: '
@@ -235,7 +179,7 @@ class Inspector(base.InspectInterface):
:param task: a task from TaskManager.
:raises: UnsupportedDriverExtension
"""
- _parse_kernel_params()
+ utils.parse_kernel_params(CONF.inspector.extra_kernel_params)
if CONF.inspector.require_managed_boot:
_ironic_manages_boot(task, raise_exc=True)
@@ -250,16 +194,7 @@ class Inspector(base.InspectInterface):
:raises: HardwareInspectionFailure on failure
"""
try:
- enabled_macs = task.driver.management.get_mac_addresses(task)
- if enabled_macs:
- inspect_utils.create_ports_if_not_exist(task, enabled_macs)
- else:
- LOG.warning("Not attempting to create any port as no NICs "
- "were discovered in 'enabled' state for node "
- "%(node)s: %(mac_data)s",
- {'mac_data': enabled_macs,
- 'node': task.node.uuid})
-
+ inspect_utils.create_ports_if_not_exist(task)
except exception.UnsupportedDriverExtension:
LOG.debug('Pre-creating ports prior to inspection not supported'
' on node %s.', task.node.uuid)
@@ -295,7 +230,7 @@ class Inspector(base.InspectInterface):
node_uuid = task.node.uuid
LOG.debug('Aborting inspection for node %(uuid)s using '
'ironic-inspector', {'uuid': node_uuid})
- _get_client(task.context).abort_introspection(node_uuid)
+ client.get_client(task.context).abort_introspection(node_uuid)
@periodics.node_periodic(
purpose='checking hardware inspection status',
@@ -310,7 +245,7 @@ class Inspector(base.InspectInterface):
def _start_inspection(node_uuid, context):
"""Call to inspector to start inspection."""
try:
- _get_client(context).start_introspection(node_uuid)
+ client.get_client(context).start_introspection(node_uuid)
except Exception as exc:
LOG.error('Error contacting ironic-inspector for inspection of node '
'%(node)s: %(cls)s: %(err)s',
@@ -339,7 +274,7 @@ def _check_status(task):
task.node.uuid)
try:
- inspector_client = _get_client(task.context)
+ inspector_client = client.get_client(task.context)
status = inspector_client.get_introspection(node.uuid)
except Exception:
# NOTE(dtantsur): get_status should not normally raise
@@ -364,15 +299,17 @@ def _check_status(task):
_inspection_error_handler(task, error)
elif status.is_finished:
_clean_up(task)
- store_data = CONF.inventory.data_backend
- if store_data == 'none':
- LOG.debug('Introspection data storage is disabled, the data will '
- 'not be saved for node %(node)s', {'node': node.uuid})
+ if CONF.inventory.data_backend == 'none':
+ LOG.debug('Inspection data storage is disabled, the data will '
+ 'not be saved for node %s', node.uuid)
return
introspection_data = inspector_client.get_introspection_data(
node.uuid, processed=True)
- inspect_utils.store_introspection_data(node, introspection_data,
- task.context)
+ # TODO(dtantsur): having no inventory is an abnormal state, handle it.
+ inventory = introspection_data.pop('inventory', {})
+ inspect_utils.store_inspection_data(node, inventory,
+ introspection_data,
+ task.context)
def _clean_up(task):
diff --git a/ironic/drivers/modules/pxe.py b/ironic/drivers/modules/pxe.py
index fe93acefd..a55f5b9fd 100644
--- a/ironic/drivers/modules/pxe.py
+++ b/ironic/drivers/modules/pxe.py
@@ -27,6 +27,7 @@ from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import agent_base
+from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import pxe_base
LOG = logging.getLogger(__name__)
@@ -114,21 +115,11 @@ class PXEAnacondaDeploy(agent_base.AgentBaseMixin, agent_base.HeartbeatMixin,
def reboot_to_instance(self, task):
node = task.node
try:
- # anaconda deploy will install the bootloader and the node is ready
- # to boot from disk.
-
- deploy_utils.try_set_boot_device(task, boot_devices.DISK)
- except Exception as e:
- msg = (_("Failed to change the boot device to %(boot_dev)s "
- "when deploying node %(node)s. Error: %(error)s") %
- {'boot_dev': boot_devices.DISK, 'node': node.uuid,
- 'error': e})
- agent_base.log_and_raise_deployment_error(task, msg)
-
- try:
task.process_event('resume')
self.clean_up(task)
manager_utils.node_power_action(task, states.POWER_OFF)
+ deploy_utils.try_set_boot_device(task, boot_devices.DISK)
+ boot_mode_utils.configure_secure_boot_if_needed(task)
task.driver.network.remove_provisioning_network(task)
task.driver.network.configure_tenant_networks(task)
manager_utils.node_power_action(task, states.POWER_ON)
diff --git a/ironic/drivers/modules/pxe_base.py b/ironic/drivers/modules/pxe_base.py
index daa90ba8d..f3ac49890 100644
--- a/ironic/drivers/modules/pxe_base.py
+++ b/ironic/drivers/modules/pxe_base.py
@@ -231,11 +231,12 @@ class PXEBaseMixin(object):
:returns: None
"""
boot_mode_utils.sync_boot_mode(task)
- boot_mode_utils.configure_secure_boot_if_needed(task)
-
node = task.node
- boot_option = deploy_utils.get_boot_option(node)
boot_device = None
+ boot_option = deploy_utils.get_boot_option(node)
+ if boot_option != "kickstart":
+ boot_mode_utils.configure_secure_boot_if_needed(task)
+
instance_image_info = {}
if boot_option == "ramdisk" or boot_option == "kickstart":
instance_image_info = pxe_utils.get_instance_image_info(
diff --git a/ironic/drivers/modules/snmp.py b/ironic/drivers/modules/snmp.py
index d544d5687..435d24b78 100644
--- a/ironic/drivers/modules/snmp.py
+++ b/ironic/drivers/modules/snmp.py
@@ -392,9 +392,9 @@ def _get_client(snmp_info):
snmp_info.get("read_community"),
snmp_info.get("write_community"),
snmp_info.get("user"),
- snmp_info.get("auth_proto"),
+ snmp_info.get("auth_protocol"),
snmp_info.get("auth_key"),
- snmp_info.get("priv_proto"),
+ snmp_info.get("priv_protocol"),
snmp_info.get("priv_key"),
snmp_info.get("context_engine_id"),
snmp_info.get("context_name"))
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index d56652b1e..87a029057 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -44,8 +44,8 @@ from ironic.common import indicator_states
from ironic.common import policy
from ironic.common import states
from ironic.conductor import rpcapi
+from ironic.conf import CONF
from ironic.drivers.modules import inspect_utils
-from ironic.drivers.modules import inspector
from ironic import objects
from ironic.objects import fields as obj_fields
from ironic import tests as tests_root
@@ -54,7 +54,6 @@ from ironic.tests.unit.api import base as test_api_base
from ironic.tests.unit.api import utils as test_api_utils
from ironic.tests.unit.objects import utils as obj_utils
-CONF = inspector.CONF
with open(
os.path.join(
@@ -844,6 +843,64 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertIn('retired_reason', data['nodes'][0])
self.assertIn('network_data', data['nodes'][0])
+ def test_detail_snmpv3(self):
+ driver_info = {
+ 'snmp_version': 3,
+ 'snmp_user': 'test-user',
+ 'snmp_auth_protocol': 'sha',
+ 'snmp_auth_key': 'test-auth-key',
+ 'snmp_priv_protocol': 'aes',
+ 'snmp_priv_key': 'test-priv-key'
+ }
+ sanitized_driver_info = driver_info.copy()
+ sanitized_driver_info['snmp_auth_key'] = '******'
+ sanitized_driver_info['snmp_priv_key'] = '******'
+
+ node = obj_utils.create_test_node(self.context,
+ chassis_id=self.chassis.id,
+ driver_info=driver_info)
+ data = self.get_json(
+ '/nodes/detail',
+ headers={api_base.Version.string: str(api_v1.max_version())})
+ self.assertEqual(node.uuid, data['nodes'][0]["uuid"])
+ self.assertIn('name', data['nodes'][0])
+ self.assertIn('driver', data['nodes'][0])
+ self.assertIn('driver_info', data['nodes'][0])
+ self.assertEqual(sanitized_driver_info,
+ data['nodes'][0]['driver_info'])
+ self.assertIn('extra', data['nodes'][0])
+ self.assertIn('properties', data['nodes'][0])
+ self.assertIn('chassis_uuid', data['nodes'][0])
+ self.assertIn('reservation', data['nodes'][0])
+ self.assertIn('maintenance', data['nodes'][0])
+ self.assertIn('console_enabled', data['nodes'][0])
+ self.assertIn('target_power_state', data['nodes'][0])
+ self.assertIn('target_provision_state', data['nodes'][0])
+ self.assertIn('provision_updated_at', data['nodes'][0])
+ self.assertIn('inspection_finished_at', data['nodes'][0])
+ self.assertIn('inspection_started_at', data['nodes'][0])
+ self.assertIn('raid_config', data['nodes'][0])
+ self.assertIn('target_raid_config', data['nodes'][0])
+ self.assertIn('network_interface', data['nodes'][0])
+ self.assertIn('resource_class', data['nodes'][0])
+ for field in api_utils.V31_FIELDS:
+ self.assertIn(field, data['nodes'][0])
+ self.assertIn('storage_interface', data['nodes'][0])
+ self.assertIn('traits', data['nodes'][0])
+ self.assertIn('conductor_group', data['nodes'][0])
+ self.assertIn('automated_clean', data['nodes'][0])
+ self.assertIn('protected', data['nodes'][0])
+ self.assertIn('protected_reason', data['nodes'][0])
+ self.assertIn('owner', data['nodes'][0])
+ self.assertIn('lessee', data['nodes'][0])
+ # never expose the chassis_id
+ self.assertNotIn('chassis_id', data['nodes'][0])
+ self.assertNotIn('allocation_id', data['nodes'][0])
+ self.assertIn('allocation_uuid', data['nodes'][0])
+ self.assertIn('retired', data['nodes'][0])
+ self.assertIn('retired_reason', data['nodes'][0])
+ self.assertIn('network_data', data['nodes'][0])
+
def test_detail_instance_uuid(self):
instance_uuid = '6eccd391-961c-4da5-b3c5-e2fa5cfbbd9d'
node = obj_utils.create_test_node(
@@ -7928,20 +7985,15 @@ class TestNodeInventory(test_api_base.BaseApiTest):
self.node = obj_utils.create_test_node(
self.context,
provision_state=states.AVAILABLE, name='node-81')
- self.node.save()
- self.node.obj_reset_changes()
-
- def _add_inventory(self):
- self.inventory = objects.NodeInventory(
- node_id=self.node.id, inventory_data=self.fake_inventory_data,
- plugin_data=self.fake_plugin_data)
- self.inventory.create()
+ CONF.set_override('data_backend', 'database', group='inventory')
- def test_get_old_version(self):
+ @mock.patch.object(inspect_utils, 'get_inspection_data', autospec=True)
+ def test_get_old_version(self, mock_get):
ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
headers={api_base.Version.string: "1.80"},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, ret.status_code)
+ mock_get.assert_not_called()
def test_get_inventory_no_inventory(self):
ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
@@ -7950,33 +8002,10 @@ class TestNodeInventory(test_api_base.BaseApiTest):
self.assertEqual(http_client.NOT_FOUND, ret.status_code)
def test_get_inventory(self):
- self._add_inventory()
- CONF.set_override('data_backend', 'database',
- group='inventory')
- ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
- headers={api_base.Version.string: self.version})
- self.assertEqual({'inventory': self.fake_inventory_data,
- 'plugin_data': self.fake_plugin_data}, ret)
-
- @mock.patch.object(inspect_utils, 'get_introspection_data',
- autospec=True)
- def test_get_inventory_exception(self, mock_get_data):
- CONF.set_override('data_backend', 'database',
- group='inventory')
- mock_get_data.side_effect = [
- exception.NodeInventoryNotFound]
- ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
- headers={api_base.Version.string: self.version},
- expect_errors=True)
- self.assertEqual(http_client.NOT_FOUND, ret.status_int)
-
- @mock.patch.object(inspect_utils, '_get_introspection_data_from_swift',
- autospec=True)
- def test_get_inventory_swift(self, mock_get_data):
- CONF.set_override('data_backend', 'swift',
- group='inventory')
- mock_get_data.return_value = {"inventory": self.fake_inventory_data,
- "plugin_data": self.fake_plugin_data}
+ obj_utils.create_test_inventory(
+ self.context, self.node,
+ inventory_data=self.fake_inventory_data,
+ plugin_data=self.fake_plugin_data)
ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
headers={api_base.Version.string: self.version})
self.assertEqual({'inventory': self.fake_inventory_data,
diff --git a/ironic/tests/unit/common/test_kickstart_utils.py b/ironic/tests/unit/common/test_kickstart_utils.py
index 0dd1ac572..db6123b9d 100644
--- a/ironic/tests/unit/common/test_kickstart_utils.py
+++ b/ironic/tests/unit/common/test_kickstart_utils.py
@@ -129,4 +129,5 @@ echo $CONTENT | /usr/bin/base64 --decode > {file_path}\n\
task.node.instance_info = i_info
task.node.save()
self.assertEqual(expected, ks_utils.prepare_config_drive(task))
- mock_get.assert_called_with('http://server/fake-configdrive-url')
+ mock_get.assert_called_with('http://server/fake-configdrive-url',
+ timeout=60)
diff --git a/ironic/tests/unit/common/test_molds.py b/ironic/tests/unit/common/test_molds.py
index 810dd61bc..2323c2fa8 100644
--- a/ironic/tests/unit/common/test_molds.py
+++ b/ironic/tests/unit/common/test_molds.py
@@ -46,7 +46,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
molds.save_configuration(task, url, data)
mock_put.assert_called_once_with(url, '{\n "key": "value"\n}',
- headers={'X-Auth-Token': 'token'})
+ headers={'X-Auth-Token': 'token'},
+ timeout=60)
@mock.patch.object(swift, 'get_swift_session', autospec=True)
@mock.patch.object(requests, 'put', autospec=True)
@@ -77,7 +78,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
mock_put.assert_called_once_with(
url, '{\n "key": "value"\n}',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_http_noauth(self, mock_put):
@@ -91,7 +93,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
molds.save_configuration(task, url, data)
mock_put.assert_called_once_with(
url, '{\n "key": "value"\n}',
- headers=None)
+ headers=None,
+ timeout=60)
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_http_error(self, mock_put):
@@ -112,7 +115,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
{'key': 'value'})
mock_put.assert_called_once_with(
'https://example.com/file2', '{\n "key": "value"\n}',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
@mock.patch.object(requests, 'put', autospec=True)
def test_save_configuration_connection_error(self, mock_put):
@@ -132,7 +136,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
task, 'https://example.com/file2', {'key': 'value'})
mock_put.assert_called_with(
'https://example.com/file2', '{\n "key": "value"\n}',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertEqual(mock_put.call_count, 3)
@mock.patch.object(requests, 'put', autospec=True)
@@ -155,7 +160,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
{'key': 'value'})
mock_put.assert_called_with(
'https://example.com/file2', '{\n "key": "value"\n}',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertEqual(mock_put.call_count, 2)
@mock.patch.object(swift, 'get_swift_session', autospec=True)
@@ -176,7 +182,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
result = molds.get_configuration(task, url)
mock_get.assert_called_once_with(
- url, headers={'X-Auth-Token': 'token'})
+ url, headers={'X-Auth-Token': 'token'},
+ timeout=60)
self.assertJsonEqual({'key': 'value'}, result)
@mock.patch.object(swift, 'get_swift_session', autospec=True)
@@ -210,7 +217,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
result = molds.get_configuration(task, url)
mock_get.assert_called_once_with(
- url, headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ url, headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertJsonEqual({"key": "value"}, result)
@mock.patch.object(requests, 'get', autospec=True)
@@ -228,7 +236,7 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid) as task:
result = molds.get_configuration(task, url)
- mock_get.assert_called_once_with(url, headers=None)
+ mock_get.assert_called_once_with(url, headers=None, timeout=60)
self.assertJsonEqual({"key": "value"}, result)
@mock.patch.object(requests, 'get', autospec=True)
@@ -249,7 +257,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
'https://example.com/file2')
mock_get.assert_called_once_with(
'https://example.com/file2',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
@mock.patch.object(requests, 'get', autospec=True)
def test_get_configuration_connection_error(self, mock_get):
@@ -269,7 +278,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
task, 'https://example.com/file2')
mock_get.assert_called_with(
'https://example.com/file2',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertEqual(mock_get.call_count, 3)
@mock.patch.object(requests, 'get', autospec=True)
@@ -291,7 +301,8 @@ class ConfigurationMoldTestCase(db_base.DbTestCase):
'https://example.com/file2')
mock_get.assert_called_with(
'https://example.com/file2',
- headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='})
+ headers={'Authorization': 'Basic dXNlcjpwYXNzd29yZA=='},
+ timeout=60)
self.assertEqual(mock_get.call_count, 2)
@mock.patch.object(requests, 'get', autospec=True)
diff --git a/ironic/tests/unit/common/test_release_mappings.py b/ironic/tests/unit/common/test_release_mappings.py
index dad536257..e6f4479b9 100644
--- a/ironic/tests/unit/common/test_release_mappings.py
+++ b/ironic/tests/unit/common/test_release_mappings.py
@@ -44,7 +44,7 @@ NUMERIC_RELEASES = sorted(
map(versionutils.convert_version_to_tuple,
set(release_mappings.RELEASE_MAPPING)
# Update the exceptions whenever needed
- - {'master', 'zed', 'yoga'}),
+ - {'master', '2023.1', 'antelope', 'zed', 'yoga'}),
reverse=True)
diff --git a/ironic/tests/unit/conductor/test_cleaning.py b/ironic/tests/unit/conductor/test_cleaning.py
index a4c3d57b6..34e805deb 100644
--- a/ironic/tests/unit/conductor/test_cleaning.py
+++ b/ironic/tests/unit/conductor/test_cleaning.py
@@ -1138,12 +1138,12 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
class DoNodeCleanAbortTestCase(db_base.DbTestCase):
@mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
- def _test__do_node_clean_abort(self, step_name, tear_mock):
+ def _test_do_node_clean_abort(self, clean_step, tear_mock):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
- provision_state=states.CLEANFAIL,
+ provision_state=states.CLEANWAIT,
target_provision_state=states.AVAILABLE,
- clean_step={'step': 'foo', 'abortable': True},
+ clean_step=clean_step,
driver_internal_info={
'agent_url': 'some url',
'agent_secret_token': 'token',
@@ -1153,11 +1153,11 @@ class DoNodeCleanAbortTestCase(db_base.DbTestCase):
'skip_current_clean_step': True})
with task_manager.acquire(self.context, node.uuid) as task:
- cleaning.do_node_clean_abort(task, step_name=step_name)
+ cleaning.do_node_clean_abort(task)
self.assertIsNotNone(task.node.last_error)
tear_mock.assert_called_once_with(task.driver.deploy, task)
- if step_name:
- self.assertIn(step_name, task.node.last_error)
+ if clean_step:
+ self.assertIn(clean_step['step'], task.node.last_error)
# assert node's clean_step and metadata was cleaned up
self.assertEqual({}, task.node.clean_step)
self.assertNotIn('clean_step_index',
@@ -1173,11 +1173,12 @@ class DoNodeCleanAbortTestCase(db_base.DbTestCase):
self.assertNotIn('agent_secret_token',
task.node.driver_internal_info)
- def test__do_node_clean_abort(self):
- self._test__do_node_clean_abort(None)
+ def test_do_node_clean_abort_early(self):
+ self._test_do_node_clean_abort(None)
- def test__do_node_clean_abort_with_step_name(self):
- self._test__do_node_clean_abort('foo')
+ def test_do_node_clean_abort_with_step(self):
+ self._test_do_node_clean_abort({'step': 'foo', 'interface': 'deploy',
+ 'abortable': True})
@mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
def test__do_node_clean_abort_tear_down_fail(self, tear_mock):
diff --git a/ironic/tests/unit/conductor/test_inspection.py b/ironic/tests/unit/conductor/test_inspection.py
new file mode 100644
index 000000000..c64b883e4
--- /dev/null
+++ b/ironic/tests/unit/conductor/test_inspection.py
@@ -0,0 +1,118 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from ironic.common import exception
+from ironic.common import states
+from ironic.conductor import inspection
+from ironic.conductor import task_manager
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.objects import utils as obj_utils
+
+
+@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
+ autospec=True)
+class TestInspectHardware(db_base.DbTestCase):
+
+ def test_inspect_hardware_ok(self, mock_inspect):
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING,
+ driver_internal_info={'agent_url': 'url',
+ 'agent_secret_token': 'token'})
+ task = task_manager.TaskManager(self.context, node.uuid)
+ mock_inspect.return_value = states.MANAGEABLE
+ inspection.inspect_hardware(task)
+ node.refresh()
+ self.assertEqual(states.MANAGEABLE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertIsNone(node.last_error)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
+ task.node.refresh()
+ self.assertNotIn('agent_url', task.node.driver_internal_info)
+ self.assertNotIn('agent_secret_token', task.node.driver_internal_info)
+
+ def test_inspect_hardware_return_inspecting(self, mock_inspect):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING)
+ task = task_manager.TaskManager(self.context, node.uuid)
+ mock_inspect.return_value = states.INSPECTING
+ self.assertRaises(exception.HardwareInspectionFailure,
+ inspection.inspect_hardware, task)
+
+ node.refresh()
+ self.assertIn('driver returned unexpected state', node.last_error)
+ self.assertEqual(states.INSPECTFAIL, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
+
+ def test_inspect_hardware_return_inspect_wait(self, mock_inspect):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING)
+ task = task_manager.TaskManager(self.context, node.uuid)
+ mock_inspect.return_value = states.INSPECTWAIT
+ inspection.inspect_hardware(task)
+ node.refresh()
+ self.assertEqual(states.INSPECTWAIT, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ self.assertIsNone(node.last_error)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
+
+ @mock.patch.object(inspection, 'LOG', autospec=True)
+ def test_inspect_hardware_return_other_state(self, log_mock, mock_inspect):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING)
+ task = task_manager.TaskManager(self.context, node.uuid)
+ mock_inspect.return_value = None
+ self.assertRaises(exception.HardwareInspectionFailure,
+ inspection.inspect_hardware, task)
+ node.refresh()
+ self.assertEqual(states.INSPECTFAIL, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ self.assertIsNotNone(node.last_error)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
+ self.assertTrue(log_mock.error.called)
+
+ def test_inspect_hardware_raises_error(self, mock_inspect):
+ mock_inspect.side_effect = exception.HardwareInspectionFailure('test')
+ state = states.MANAGEABLE
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING,
+ target_provision_state=state)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ self.assertRaisesRegex(exception.HardwareInspectionFailure, '^test$',
+ inspection.inspect_hardware, task)
+ node.refresh()
+ self.assertEqual(states.INSPECTFAIL, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ self.assertEqual('test', node.last_error)
+ self.assertTrue(mock_inspect.called)
+
+ def test_inspect_hardware_unexpected_error(self, mock_inspect):
+ mock_inspect.side_effect = RuntimeError('x')
+ state = states.MANAGEABLE
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.INSPECTING,
+ target_provision_state=state)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ self.assertRaisesRegex(exception.HardwareInspectionFailure,
+ 'Unexpected exception of type RuntimeError: x',
+ inspection.inspect_hardware, task)
+ node.refresh()
+ self.assertEqual(states.INSPECTFAIL, node.provision_state)
+ self.assertEqual(states.MANAGEABLE, node.target_provision_state)
+ self.assertEqual('Unexpected exception of type RuntimeError: x',
+ node.last_error)
+ self.assertTrue(mock_inspect.called)
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index ded80718d..7278486a5 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -26,6 +26,7 @@ from unittest import mock
import eventlet
from futurist import waiters
+from ironic_lib import metrics as ironic_metrics
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import uuidutils
@@ -45,6 +46,7 @@ from ironic.common import nova
from ironic.common import states
from ironic.conductor import cleaning
from ironic.conductor import deployments
+from ironic.conductor import inspection
from ironic.conductor import manager
from ironic.conductor import notification_utils
from ironic.conductor import steps as conductor_steps
@@ -2734,7 +2736,8 @@ class DoProvisioningActionTestCase(mgr_utils.ServiceSetUpMixin,
# Node will be moved to tgt_prov_state after cleaning, not tested here
self.assertEqual(states.CLEANFAIL, node.provision_state)
self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertIsNone(node.last_error)
+ self.assertEqual('By request, the clean operation was aborted',
+ node.last_error)
mock_spawn.assert_called_with(
self.service, cleaning.do_node_clean_abort, mock.ANY)
@@ -4273,7 +4276,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__filter_out_unsupported_types_all(self):
self._start_service()
- CONF.set_override('send_sensor_data_types', ['All'], group='conductor')
+ CONF.set_override('data_types', ['All'],
+ group='sensor_data')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
@@ -4282,7 +4286,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__filter_out_unsupported_types_part(self):
self._start_service()
- CONF.set_override('send_sensor_data_types', ['t1'], group='conductor')
+ CONF.set_override('data_types', ['t1'],
+ group='sensor_data')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
@@ -4291,7 +4296,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__filter_out_unsupported_types_non(self):
self._start_service()
- CONF.set_override('send_sensor_data_types', ['t3'], group='conductor')
+ CONF.set_override('data_types', ['t3'],
+ group='sensor_data')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
@@ -4305,7 +4311,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
for i in range(5):
nodes.put_nowait(('fake_uuid-%d' % i, 'fake-hardware', '', None))
self._start_service()
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True,
+ group='sensor_data')
task = acquire_mock.return_value.__enter__.return_value
task.node.maintenance = False
@@ -4334,7 +4341,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
self._start_service()
self.service._shutdown = True
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True,
+ group='sensor_data')
self.service._sensors_nodes_task(self.context, nodes)
acquire_mock.return_value.__enter__.assert_not_called()
@@ -4343,7 +4351,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True,
+ group='sensor_data')
self._start_service()
@@ -4361,7 +4370,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
self._start_service()
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
task = acquire_mock.return_value.__enter__.return_value
task.node.maintenance = True
@@ -4384,10 +4393,10 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn):
self._start_service()
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
# NOTE(galyna): do not wait for threads to be finished in unittests
- CONF.set_override('send_sensor_data_wait_timeout', 0,
- group='conductor')
+ CONF.set_override('wait_timeout', 0,
+ group='sensor_data')
_mapped_to_this_conductor_mock.return_value = True
get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake', None)]
self.service._send_sensor_data(self.context)
@@ -4395,6 +4404,37 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.service._sensors_nodes_task,
self.context, mock.ANY)
+ @mock.patch.object(queue, 'Queue', autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_sensors_conductor',
+ autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_spawn_worker',
+ autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+ @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
+ def test___send_sensor_data_disabled(
+ self, get_nodeinfo_list_mock,
+ _mapped_to_this_conductor_mock,
+ mock_spawn, mock_sensors_conductor,
+ mock_queue):
+ self._start_service()
+
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
+ CONF.set_override('enable_for_nodes', False,
+ group='sensor_data')
+ CONF.set_override('enable_for_conductor', False,
+ group='sensor_data')
+ # NOTE(galyna): do not wait for threads to be finished in unittests
+ CONF.set_override('wait_timeout', 0,
+ group='sensor_data')
+ _mapped_to_this_conductor_mock.return_value = True
+ get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake', None)]
+ self.service._send_sensor_data(self.context)
+ mock_sensors_conductor.assert_not_called()
+ # NOTE(TheJulia): Can't use the spawn worker since it records other,
+ # unrelated calls. So, queue works well here.
+ mock_queue.assert_not_called()
+
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
@@ -4407,24 +4447,66 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.reset_mock()
number_of_workers = 8
- CONF.set_override('send_sensor_data', True, group='conductor')
- CONF.set_override('send_sensor_data_workers', number_of_workers,
- group='conductor')
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
+ CONF.set_override('workers', number_of_workers,
+ group='sensor_data')
# NOTE(galyna): do not wait for threads to be finished in unittests
- CONF.set_override('send_sensor_data_wait_timeout', 0,
- group='conductor')
+ CONF.set_override('wait_timeout', 0,
+ group='sensor_data')
_mapped_to_this_conductor_mock.return_value = True
get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake',
None)] * 20
self.service._send_sensor_data(self.context)
- self.assertEqual(number_of_workers,
+ self.assertEqual(number_of_workers + 1,
mock_spawn.call_count)
# TODO(TheJulia): At some point, we should add a test to validate that
# a modified filter to return all nodes actually works, although
# the way the sensor tests are written, the list is all mocked.
+ @mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
+ autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+ @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
+ def test___send_sensor_data_one_worker(
+ self, get_nodeinfo_list_mock, _mapped_to_this_conductor_mock,
+ mock_spawn):
+ self._start_service()
+ mock_spawn.reset_mock()
+
+ number_of_workers = 1
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
+ CONF.set_override('workers', number_of_workers,
+ group='sensor_data')
+ # NOTE(galyna): do not wait for threads to be finished in unittests
+ CONF.set_override('wait_timeout', 0,
+ group='sensor_data')
+
+ _mapped_to_this_conductor_mock.return_value = True
+ get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake',
+ None)] * 20
+ self.service._send_sensor_data(self.context)
+ self.assertEqual(number_of_workers,
+ mock_spawn.call_count)
+
+ @mock.patch.object(messaging.Notifier, 'info', autospec=True)
+ @mock.patch.object(ironic_metrics.MetricLogger,
+ 'get_metrics_data', autospec=True)
+ def test__sensors_conductor(self, mock_get_metrics, mock_notifier):
+ metric = {'metric': 'data'}
+ mock_get_metrics.return_value = metric
+ self._start_service()
+ self.service._sensors_conductor(self.context)
+ self.assertEqual(mock_notifier.call_count, 1)
+ self.assertEqual('ironic.metrics', mock_notifier.call_args.args[2])
+ metrics_dict = mock_notifier.call_args.args[3]
+ self.assertEqual(metrics_dict.get('event_type'),
+ 'ironic.metrics.update')
+ self.assertDictEqual(metrics_dict.get('payload'),
+ metric)
+
@mgr_utils.mock_record_keepalive
class BootDeviceTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@@ -6380,77 +6462,6 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
@mgr_utils.mock_record_keepalive
class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_ok(self, mock_inspect):
- self._start_service()
- node = obj_utils.create_test_node(
- self.context, driver='fake-hardware',
- provision_state=states.INSPECTING,
- driver_internal_info={'agent_url': 'url',
- 'agent_secret_token': 'token'})
- task = task_manager.TaskManager(self.context, node.uuid)
- mock_inspect.return_value = states.MANAGEABLE
- manager._do_inspect_hardware(task)
- node.refresh()
- self.assertEqual(states.MANAGEABLE, node.provision_state)
- self.assertEqual(states.NOSTATE, node.target_provision_state)
- self.assertIsNone(node.last_error)
- mock_inspect.assert_called_once_with(task.driver.inspect, task)
- task.node.refresh()
- self.assertNotIn('agent_url', task.node.driver_internal_info)
- self.assertNotIn('agent_secret_token', task.node.driver_internal_info)
-
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_return_inspecting(self, mock_inspect):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING)
- task = task_manager.TaskManager(self.context, node.uuid)
- mock_inspect.return_value = states.INSPECTING
- self.assertRaises(exception.HardwareInspectionFailure,
- manager._do_inspect_hardware, task)
-
- node.refresh()
- self.assertIn('driver returned unexpected state', node.last_error)
- self.assertEqual(states.INSPECTFAIL, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- mock_inspect.assert_called_once_with(task.driver.inspect, task)
-
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_return_inspect_wait(self, mock_inspect):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING)
- task = task_manager.TaskManager(self.context, node.uuid)
- mock_inspect.return_value = states.INSPECTWAIT
- manager._do_inspect_hardware(task)
- node.refresh()
- self.assertEqual(states.INSPECTWAIT, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- self.assertIsNone(node.last_error)
- mock_inspect.assert_called_once_with(task.driver.inspect, task)
-
- @mock.patch.object(manager, 'LOG', autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_return_other_state(self, mock_inspect, log_mock):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING)
- task = task_manager.TaskManager(self.context, node.uuid)
- mock_inspect.return_value = None
- self.assertRaises(exception.HardwareInspectionFailure,
- manager._do_inspect_hardware, task)
- node.refresh()
- self.assertEqual(states.INSPECTFAIL, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- self.assertIsNotNone(node.last_error)
- mock_inspect.assert_called_once_with(task.driver.inspect, task)
- self.assertTrue(log_mock.error.called)
-
def test__check_inspect_wait_timeouts(self):
self._start_service()
CONF.set_override('inspect_wait_timeout', 1, group='conductor')
@@ -6528,46 +6539,6 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_inspect_hardware_power_validate_fail(self, mock_validate):
self._test_inspect_hardware_validate_fail(mock_validate)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_raises_error(self, mock_inspect):
- self._start_service()
- mock_inspect.side_effect = exception.HardwareInspectionFailure('test')
- state = states.MANAGEABLE
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING,
- target_provision_state=state)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.assertRaisesRegex(exception.HardwareInspectionFailure, '^test$',
- manager._do_inspect_hardware, task)
- node.refresh()
- self.assertEqual(states.INSPECTFAIL, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- self.assertEqual('test', node.last_error)
- self.assertTrue(mock_inspect.called)
-
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
- autospec=True)
- def test_inspect_hardware_unexpected_error(self, mock_inspect):
- self._start_service()
- mock_inspect.side_effect = RuntimeError('x')
- state = states.MANAGEABLE
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- provision_state=states.INSPECTING,
- target_provision_state=state)
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.assertRaisesRegex(exception.HardwareInspectionFailure,
- 'Unexpected exception of type RuntimeError: x',
- manager._do_inspect_hardware, task)
- node.refresh()
- self.assertEqual(states.INSPECTFAIL, node.provision_state)
- self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- self.assertEqual('Unexpected exception of type RuntimeError: x',
- node.last_error)
- self.assertTrue(mock_inspect.called)
-
@mock.patch.object(conductor_utils, 'node_history_record',
mock.Mock(spec=conductor_utils.node_history_record))
@@ -8166,7 +8137,7 @@ class NodeTraitsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
- @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch.object(inspection, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_interface_not_support(self, mock_acquire,
@@ -8187,7 +8158,7 @@ class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
exc.exc_info[0])
self.assertTrue(mock_log.error.called)
- @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch.object(inspection, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_interface_return_failed(self, mock_acquire,
diff --git a/ironic/tests/unit/conductor/test_utils.py b/ironic/tests/unit/conductor/test_utils.py
index a424e5132..52fc72436 100644
--- a/ironic/tests/unit/conductor/test_utils.py
+++ b/ironic/tests/unit/conductor/test_utils.py
@@ -196,7 +196,8 @@ class NodePowerActionTestCase(db_base.DbTestCase):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake-hardware',
- power_state=states.POWER_OFF)
+ power_state=states.POWER_OFF,
+ last_error='failed before')
task = task_manager.TaskManager(self.context, node.uuid)
get_power_mock.return_value = states.POWER_OFF
@@ -209,6 +210,27 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
+ @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
+ def test_node_power_action_keep_last_error(self, get_power_mock):
+ """Test node_power_action to keep last_error for failed states."""
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='fake-hardware',
+ power_state=states.POWER_OFF,
+ provision_state=states.CLEANFAIL,
+ last_error='failed before')
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ get_power_mock.return_value = states.POWER_OFF
+
+ conductor_utils.node_power_action(task, states.POWER_ON)
+
+ node.refresh()
+ get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
+ self.assertEqual(states.POWER_ON, node['power_state'])
+ self.assertIsNone(node['target_power_state'])
+ self.assertEqual('failed before', node['last_error'])
+
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
@@ -282,6 +304,31 @@ class NodePowerActionTestCase(db_base.DbTestCase):
node['driver_internal_info'])
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
+ def test_node_power_action_power_off_already(self, get_power_mock):
+ """Test node_power_action to turn node power off, but already off."""
+ dii = {'agent_secret_token': 'token',
+ 'agent_cached_deploy_steps': ['steps']}
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='fake-hardware',
+ power_state=states.POWER_ON,
+ driver_internal_info=dii)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ get_power_mock.return_value = states.POWER_OFF
+
+ conductor_utils.node_power_action(task, states.POWER_OFF)
+
+ node.refresh()
+ get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
+ self.assertEqual(states.POWER_OFF, node['power_state'])
+ self.assertIsNone(node['target_power_state'])
+ self.assertIsNone(node['last_error'])
+ self.assertNotIn('agent_secret_token', node['driver_internal_info'])
+ self.assertNotIn('agent_cached_deploy_steps',
+ node['driver_internal_info'])
+
+ @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_node_power_action_power_off_pregenerated_token(self,
get_power_mock):
dii = {'agent_secret_token': 'token',
@@ -1150,6 +1197,9 @@ class ErrorHandlersTestCase(db_base.DbTestCase):
self.node.set_driver_internal_info('skip_current_clean_step', True)
self.node.set_driver_internal_info('clean_step_index', 0)
self.node.set_driver_internal_info('agent_url', 'url')
+ self.node.set_driver_internal_info('agent_secret_token', 'foo')
+ self.node.set_driver_internal_info('agent_secret_token_pregenerated',
+ False)
msg = 'error bar'
last_error = "last error"
@@ -1162,6 +1212,9 @@ class ErrorHandlersTestCase(db_base.DbTestCase):
self.assertNotIn('cleaning_polling', self.node.driver_internal_info)
self.assertNotIn('skip_current_clean_step',
self.node.driver_internal_info)
+ self.assertNotIn('agent_secret_token', self.node.driver_internal_info)
+ self.assertNotIn('agent_secret_token_pregenerated',
+ self.node.driver_internal_info)
self.assertEqual(last_error, self.node.last_error)
self.assertTrue(self.node.maintenance)
self.assertEqual(last_error, self.node.maintenance_reason)
diff --git a/ironic/tests/unit/db/test_api.py b/ironic/tests/unit/db/test_api.py
index 6142fdfae..2396b1253 100644
--- a/ironic/tests/unit/db/test_api.py
+++ b/ironic/tests/unit/db/test_api.py
@@ -226,6 +226,11 @@ class UpdateToLatestVersionsTestCase(base.DbTestCase):
for i in range(0, num_nodes):
node = utils.create_test_node(version=version,
uuid=uuidutils.generate_uuid())
+ # Create entries on the tables so we force field upgrades
+ utils.create_test_node_trait(node_id=node.id, trait='foo',
+ version='0.0')
+ utils.create_test_bios_setting(node_id=node.id, version='1.0')
+
nodes.append(node.uuid)
for uuid in nodes:
node = self.dbapi.get_node_by_uuid(uuid)
@@ -238,10 +243,15 @@ class UpdateToLatestVersionsTestCase(base.DbTestCase):
return
nodes = self._create_nodes(5)
+ # Check/migrate 2, 10 remain.
+ self.assertEqual(
+ (10, 2), self.dbapi.update_to_latest_versions(self.context, 2))
+ # Check/migrate 10, 8 migrated, 8 remain.
self.assertEqual(
- (5, 2), self.dbapi.update_to_latest_versions(self.context, 2))
+ (8, 8), self.dbapi.update_to_latest_versions(self.context, 10))
+ # Just make sure it is still 0, 0 in case more things are added.
self.assertEqual(
- (3, 3), self.dbapi.update_to_latest_versions(self.context, 10))
+ (0, 0), self.dbapi.update_to_latest_versions(self.context, 10))
for uuid in nodes:
node = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(self.node_ver, node.version)
@@ -250,10 +260,19 @@ class UpdateToLatestVersionsTestCase(base.DbTestCase):
if self.node_version_same:
# can't test if we don't have diff versions of the node
return
-
- nodes = self._create_nodes(5)
+ vm_count = 5
+ nodes = self._create_nodes(vm_count)
+ # NOTE(TheJulia): Under current testing, 5 node will result in 10
+ # records implicitly needing to be migrated.
+ migrate_count = vm_count * 2
+ self.assertEqual(
+ (migrate_count, migrate_count),
+ self.dbapi.update_to_latest_versions(self.context,
+ migrate_count))
self.assertEqual(
- (5, 5), self.dbapi.update_to_latest_versions(self.context, 5))
+ (0, 0), self.dbapi.update_to_latest_versions(self.context,
+ migrate_count))
+
for uuid in nodes:
node = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(self.node_ver, node.version)
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_boot.py b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
index 8aa6f78da..8ebaa14fa 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
@@ -1132,6 +1132,45 @@ class IloPXEBootTestCase(test_common.BaseIloTest):
self.assertIsNone(task.node.driver_internal_info.get(
'ilo_uefi_iscsi_boot'))
+ @mock.patch.object(ilo_boot, 'prepare_node_for_deploy', spec_set=True,
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
+ @mock.patch.object(deploy_utils, 'is_iscsi_boot',
+ spec_set=True, autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_instance', spec_set=True,
+ autospec=True)
+ def _test_prepare_instance_anaconda(self, pxe_prepare_instance_mock,
+ update_boot_mode_mock,
+ get_boot_mode_mock,
+ is_iscsi_boot_mock,
+ mock_get_boot_opt,
+ mock_prep_node_fr_deploy, prov_state):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.provision_state = prov_state
+ mock_get_boot_opt.return_value = 'kickstart'
+ is_iscsi_boot_mock.return_value = False
+ get_boot_mode_mock.return_value = 'uefi'
+ task.driver.boot.prepare_instance(task)
+ update_boot_mode_mock.assert_called_once_with(task)
+ pxe_prepare_instance_mock.assert_called_once_with(mock.ANY, task)
+ self.assertIsNone(task.node.driver_internal_info.get(
+ 'ilo_uefi_iscsi_boot'))
+ mock_prep_node_fr_deploy.assert_called_once_with(task)
+
+ def test_prepare_instance_anaconda_deploying(self):
+ self._test_prepare_instance_anaconda(prov_state=states.DEPLOYING)
+
+ def test_prepare_instance_anaconda_rescuing(self):
+ self._test_prepare_instance_anaconda(prov_state=states.RESCUING)
+
+ def test_prepare_instance_anaconda_cleaning(self):
+ self._test_prepare_instance_anaconda(prov_state=states.CLEANING)
+
@mock.patch.object(deploy_utils, 'is_iscsi_boot',
spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
@@ -1299,6 +1338,45 @@ class IloiPXEBootTestCase(test_common.BaseIloTest):
self.assertIsNone(task.node.driver_internal_info.get(
'ilo_uefi_iscsi_boot'))
+ @mock.patch.object(ilo_boot, 'prepare_node_for_deploy', spec_set=True,
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'get_boot_option', autospec=True)
+ @mock.patch.object(deploy_utils, 'is_iscsi_boot',
+ spec_set=True, autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
+ autospec=True)
+ @mock.patch.object(ipxe.iPXEBoot, 'prepare_instance', spec_set=True,
+ autospec=True)
+ def _test_prepare_instance_anaconda(self, pxe_prepare_instance_mock,
+ update_boot_mode_mock,
+ get_boot_mode_mock,
+ is_iscsi_boot_mock,
+ mock_get_boot_opt,
+ mock_prep_node_fr_deploy, prov_state):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.provision_state = prov_state
+ mock_get_boot_opt.return_value = 'kickstart'
+ is_iscsi_boot_mock.return_value = False
+ get_boot_mode_mock.return_value = 'uefi'
+ task.driver.boot.prepare_instance(task)
+ update_boot_mode_mock.assert_called_once_with(task)
+ pxe_prepare_instance_mock.assert_called_once_with(mock.ANY, task)
+ self.assertIsNone(task.node.driver_internal_info.get(
+ 'ilo_uefi_iscsi_boot'))
+ mock_prep_node_fr_deploy.assert_called_once_with(task)
+
+ def test_prepare_instance_anaconda_deploying(self):
+ self._test_prepare_instance_anaconda(prov_state=states.DEPLOYING)
+
+ def test_prepare_instance_anaconda_rescuing(self):
+ self._test_prepare_instance_anaconda(prov_state=states.RESCUING)
+
+ def test_prepare_instance_anaconda_cleaning(self):
+ self._test_prepare_instance_anaconda(prov_state=states.CLEANING)
+
@mock.patch.object(deploy_utils, 'is_iscsi_boot',
spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
diff --git a/ironic/tests/unit/drivers/modules/inspector/__init__.py b/ironic/tests/unit/drivers/modules/inspector/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/inspector/__init__.py
diff --git a/ironic/tests/unit/drivers/modules/inspector/test_client.py b/ironic/tests/unit/drivers/modules/inspector/test_client.py
new file mode 100644
index 000000000..08f0fcd93
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/inspector/test_client.py
@@ -0,0 +1,65 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from keystoneauth1 import exceptions as ks_exception
+import openstack
+
+from ironic.common import context
+from ironic.common import exception
+from ironic.conf import CONF
+from ironic.drivers.modules.inspector import client
+from ironic.tests.unit.db import base as db_base
+
+
+@mock.patch('ironic.common.keystone.get_auth', autospec=True,
+ return_value=mock.sentinel.auth)
+@mock.patch('ironic.common.keystone.get_session', autospec=True,
+ return_value=mock.sentinel.session)
+@mock.patch.object(openstack.connection, 'Connection', autospec=True)
+class GetClientTestCase(db_base.DbTestCase):
+
+ def setUp(self):
+ super(GetClientTestCase, self).setUp()
+ # NOTE(pas-ha) force-reset global inspector session object
+ client._INSPECTOR_SESSION = None
+ self.context = context.RequestContext(global_request_id='global')
+
+ def test_get_client(self, mock_conn, mock_session, mock_auth):
+ client.get_client(self.context)
+ mock_conn.assert_called_once_with(
+ session=mock.sentinel.session,
+ oslo_conf=mock.ANY)
+ self.assertEqual(1, mock_auth.call_count)
+ self.assertEqual(1, mock_session.call_count)
+
+ def test_get_client_standalone(self, mock_conn, mock_session, mock_auth):
+ self.config(auth_strategy='noauth')
+ client.get_client(self.context)
+ self.assertEqual('none', CONF.inspector.auth_type)
+ mock_conn.assert_called_once_with(
+ session=mock.sentinel.session,
+ oslo_conf=mock.ANY)
+ self.assertEqual(1, mock_auth.call_count)
+ self.assertEqual(1, mock_session.call_count)
+
+ def test_get_client_connection_problem(
+ self, mock_conn, mock_session, mock_auth):
+ mock_conn.side_effect = ks_exception.DiscoveryFailure("")
+ self.assertRaises(exception.ConfigInvalid,
+ client.get_client, self.context)
+ mock_conn.assert_called_once_with(
+ session=mock.sentinel.session,
+ oslo_conf=mock.ANY)
+ self.assertEqual(1, mock_auth.call_count)
+ self.assertEqual(1, mock_session.call_count)
diff --git a/ironic/tests/unit/drivers/modules/test_inspector.py b/ironic/tests/unit/drivers/modules/inspector/test_interface.py
index 75ccc3ebf..42bb55f2b 100644
--- a/ironic/tests/unit/drivers/modules/test_inspector.py
+++ b/ironic/tests/unit/drivers/modules/inspector/test_interface.py
@@ -13,65 +13,19 @@
from unittest import mock
import eventlet
-from keystoneauth1 import exceptions as ks_exception
-import openstack
-from ironic.common import context
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
+from ironic.conf import CONF
from ironic.drivers.modules import inspect_utils
-from ironic.drivers.modules import inspector
+from ironic.drivers.modules.inspector import client
+from ironic.drivers.modules.inspector import interface as inspector
from ironic.drivers.modules.redfish import utils as redfish_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
-CONF = inspector.CONF
-
-
-@mock.patch('ironic.common.keystone.get_auth', autospec=True,
- return_value=mock.sentinel.auth)
-@mock.patch('ironic.common.keystone.get_session', autospec=True,
- return_value=mock.sentinel.session)
-@mock.patch.object(openstack.connection, 'Connection', autospec=True)
-class GetClientTestCase(db_base.DbTestCase):
-
- def setUp(self):
- super(GetClientTestCase, self).setUp()
- # NOTE(pas-ha) force-reset global inspector session object
- inspector._INSPECTOR_SESSION = None
- self.context = context.RequestContext(global_request_id='global')
-
- def test__get_client(self, mock_conn, mock_session, mock_auth):
- inspector._get_client(self.context)
- mock_conn.assert_called_once_with(
- session=mock.sentinel.session,
- oslo_conf=mock.ANY)
- self.assertEqual(1, mock_auth.call_count)
- self.assertEqual(1, mock_session.call_count)
-
- def test__get_client_standalone(self, mock_conn, mock_session, mock_auth):
- self.config(auth_strategy='noauth')
- inspector._get_client(self.context)
- self.assertEqual('none', inspector.CONF.inspector.auth_type)
- mock_conn.assert_called_once_with(
- session=mock.sentinel.session,
- oslo_conf=mock.ANY)
- self.assertEqual(1, mock_auth.call_count)
- self.assertEqual(1, mock_session.call_count)
-
- def test__get_client_connection_problem(
- self, mock_conn, mock_session, mock_auth):
- mock_conn.side_effect = ks_exception.DiscoveryFailure("")
- self.assertRaises(exception.ConfigInvalid,
- inspector._get_client, self.context)
- mock_conn.assert_called_once_with(
- session=mock.sentinel.session,
- oslo_conf=mock.ANY)
- self.assertEqual(1, mock_auth.call_count)
- self.assertEqual(1, mock_session.call_count)
-
class BaseTestCase(db_base.DbTestCase):
def setUp(self):
@@ -129,7 +83,7 @@ class CommonFunctionsTestCase(BaseTestCase):
@mock.patch.object(eventlet, 'spawn_n', lambda f, *a, **kw: f(*a, **kw))
-@mock.patch('ironic.drivers.modules.inspector._get_client', autospec=True)
+@mock.patch.object(client, 'get_client', autospec=True)
class InspectHardwareTestCase(BaseTestCase):
def test_validate_ok(self, mock_client):
self.iface.validate(self.task)
@@ -369,7 +323,7 @@ class InspectHardwareTestCase(BaseTestCase):
self.task, 'power off', timeout=None)
-@mock.patch('ironic.drivers.modules.inspector._get_client', autospec=True)
+@mock.patch.object(client, 'get_client', autospec=True)
class CheckStatusTestCase(BaseTestCase):
def setUp(self):
super(CheckStatusTestCase, self).setUp()
@@ -551,22 +505,24 @@ class CheckStatusTestCase(BaseTestCase):
self.task)
self.driver.boot.clean_up_ramdisk.assert_called_once_with(self.task)
- @mock.patch.object(inspect_utils, 'store_introspection_data',
- autospec=True)
+ @mock.patch.object(inspect_utils, 'store_inspection_data', autospec=True)
def test_status_ok_store_inventory(self, mock_store_data, mock_client):
mock_get = mock_client.return_value.get_introspection
mock_get.return_value = mock.Mock(is_finished=True,
error=None,
spec=['is_finished', 'error'])
- fake_introspection_data = {
- "inventory": {"cpu": "amd"}, "disks": [{"name": "/dev/vda"}]}
+ fake_inventory = {"cpu": "amd"}
+ fake_plugin_data = {"disks": [{"name": "/dev/vda"}]}
+ fake_introspection_data = dict(fake_plugin_data,
+ inventory=fake_inventory)
mock_get_data = mock_client.return_value.get_introspection_data
mock_get_data.return_value = fake_introspection_data
inspector._check_status(self.task)
mock_get.assert_called_once_with(self.node.uuid)
mock_get_data.assert_called_once_with(self.node.uuid, processed=True)
mock_store_data.assert_called_once_with(self.node,
- fake_introspection_data,
+ fake_inventory,
+ fake_plugin_data,
self.task.context)
def test_status_ok_store_inventory_nostore(self, mock_client):
@@ -593,7 +549,7 @@ class CheckStatusTestCase(BaseTestCase):
mock_get_data.assert_not_called()
-@mock.patch('ironic.drivers.modules.inspector._get_client', autospec=True)
+@mock.patch.object(client, 'get_client', autospec=True)
class InspectHardwareAbortTestCase(BaseTestCase):
def test_abort_ok(self, mock_client):
mock_abort = mock_client.return_value.abort_introspection
diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
index 1177e9743..7220697cb 100644
--- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
@@ -1940,7 +1940,7 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
self.node.save()
self.checksum_mock = self.useFixture(fixtures.MockPatchObject(
- fileutils, 'compute_file_checksum')).mock
+ fileutils, 'compute_file_checksum', autospec=True)).mock
self.checksum_mock.return_value = 'fake-checksum'
self.cache_image_mock = self.useFixture(fixtures.MockPatchObject(
utils, 'cache_instance_image', autospec=True)).mock
@@ -2012,9 +2012,25 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
image_info=self.image_info, expect_raw=True)
self.assertIsNone(instance_info['image_checksum'])
+ self.assertEqual(instance_info['image_os_hash_algo'], 'sha512')
+ self.assertEqual(instance_info['image_os_hash_value'],
+ 'fake-checksum')
self.assertEqual(instance_info['image_disk_format'], 'raw')
- calls = [mock.call(image_path, algorithm='sha512')]
- self.checksum_mock.assert_has_calls(calls)
+ self.checksum_mock.assert_called_once_with(image_path,
+ algorithm='sha512')
+
+ def test_build_instance_info_already_raw(self):
+ cfg.CONF.set_override('force_raw_images', True)
+ self.image_info['disk_format'] = 'raw'
+ image_path, instance_info = self._test_build_instance_info(
+ image_info=self.image_info, expect_raw=True)
+
+ self.assertEqual(instance_info['image_checksum'], 'aa')
+ self.assertEqual(instance_info['image_os_hash_algo'], 'sha512')
+ self.assertEqual(instance_info['image_os_hash_value'],
+ 'fake-sha512')
+ self.assertEqual(instance_info['image_disk_format'], 'raw')
+ self.checksum_mock.assert_not_called()
def test_build_instance_info_force_raw_drops_md5(self):
cfg.CONF.set_override('force_raw_images', True)
@@ -2027,6 +2043,17 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
calls = [mock.call(image_path, algorithm='sha256')]
self.checksum_mock.assert_has_calls(calls)
+ def test_build_instance_info_already_raw_keeps_md5(self):
+ cfg.CONF.set_override('force_raw_images', True)
+ self.image_info['os_hash_algo'] = 'md5'
+ self.image_info['disk_format'] = 'raw'
+ image_path, instance_info = self._test_build_instance_info(
+ image_info=self.image_info, expect_raw=True)
+
+ self.assertEqual(instance_info['image_checksum'], 'aa')
+ self.assertEqual(instance_info['image_disk_format'], 'raw')
+ self.checksum_mock.assert_not_called()
+
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_file_image(self, validate_href_mock):
@@ -2035,7 +2062,6 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
i_info['image_source'] = 'file://image-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
- i_info['image_checksum'] = 'aa'
driver_internal_info['is_whole_disk_image'] = True
self.node.instance_info = i_info
self.node.driver_internal_info = driver_internal_info
@@ -2052,6 +2078,7 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
self.assertEqual(expected_url, info['image_url'])
self.assertEqual('sha256', info['image_os_hash_algo'])
self.assertEqual('fake-checksum', info['image_os_hash_value'])
+ self.assertEqual('raw', info['image_disk_format'])
self.cache_image_mock.assert_called_once_with(
task.context, task.node, force_raw=True)
self.checksum_mock.assert_called_once_with(
@@ -2068,7 +2095,6 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
i_info['image_source'] = 'http://image-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
- i_info['image_checksum'] = 'aa'
driver_internal_info['is_whole_disk_image'] = True
self.node.instance_info = i_info
self.node.driver_internal_info = driver_internal_info
@@ -2102,7 +2128,6 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
i_info['image_source'] = 'http://image-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
- i_info['image_checksum'] = 'aa'
i_info['image_download_source'] = 'local'
driver_internal_info['is_whole_disk_image'] = True
self.node.instance_info = i_info
@@ -2138,7 +2163,6 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
i_info['image_source'] = 'http://image-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
- i_info['image_checksum'] = 'aa'
d_info['image_download_source'] = 'local'
driver_internal_info['is_whole_disk_image'] = True
self.node.instance_info = i_info
@@ -2164,6 +2188,41 @@ class TestBuildInstanceInfoForHttpProvisioning(db_base.DbTestCase):
validate_href_mock.assert_called_once_with(
mock.ANY, expected_url, False)
+ @mock.patch.object(image_service.HttpImageService, 'validate_href',
+ autospec=True)
+ def test_build_instance_info_local_image_already_raw(self,
+ validate_href_mock):
+ cfg.CONF.set_override('image_download_source', 'local', group='agent')
+ i_info = self.node.instance_info
+ driver_internal_info = self.node.driver_internal_info
+ i_info['image_source'] = 'http://image-ref'
+ i_info['image_checksum'] = 'aa'
+ i_info['root_gb'] = 10
+ i_info['image_disk_format'] = 'raw'
+ driver_internal_info['is_whole_disk_image'] = True
+ self.node.instance_info = i_info
+ self.node.driver_internal_info = driver_internal_info
+ self.node.save()
+
+ expected_url = (
+ 'http://172.172.24.10:8080/agent_images/%s' % self.node.uuid)
+
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
+
+ info = utils.build_instance_info_for_deploy(task)
+
+ self.assertEqual(expected_url, info['image_url'])
+ self.assertEqual('aa', info['image_checksum'])
+ self.assertEqual('raw', info['image_disk_format'])
+ self.assertIsNone(info['image_os_hash_algo'])
+ self.assertIsNone(info['image_os_hash_value'])
+ self.cache_image_mock.assert_called_once_with(
+ task.context, task.node, force_raw=True)
+ self.checksum_mock.assert_not_called()
+ validate_href_mock.assert_called_once_with(
+ mock.ANY, expected_url, False)
+
class TestStorageInterfaceUtils(db_base.DbTestCase):
def setUp(self):
diff --git a/ironic/tests/unit/drivers/modules/test_inspect_utils.py b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
index 7cb451473..57980d5a2 100644
--- a/ironic/tests/unit/drivers/modules/test_inspect_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
@@ -23,14 +23,13 @@ from ironic.common import context as ironic_context
from ironic.common import exception
from ironic.common import swift
from ironic.conductor import task_manager
+from ironic.conf import CONF
from ironic.drivers.modules import inspect_utils as utils
-from ironic.drivers.modules import inspector
from ironic import objects
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
sushy = importutils.try_import('sushy')
-CONF = inspector.CONF
@mock.patch('time.sleep', lambda sec: None)
@@ -104,7 +103,7 @@ class SwiftCleanUp(db_base.DbTestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_clean_up_swift_entries(self, swift_api_mock):
CONF.set_override('data_backend', 'swift', group='inventory')
- container = 'introspection_data'
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -118,7 +117,7 @@ class SwiftCleanUp(db_base.DbTestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_clean_up_swift_entries_with_404_exception(self, swift_api_mock):
CONF.set_override('data_backend', 'swift', group='inventory')
- container = 'introspection_data'
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -133,7 +132,7 @@ class SwiftCleanUp(db_base.DbTestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_clean_up_swift_entries_with_fail_exception(self, swift_api_mock):
CONF.set_override('data_backend', 'swift', group='inventory')
- container = 'introspection_data'
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -149,7 +148,7 @@ class SwiftCleanUp(db_base.DbTestCase):
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
def test_clean_up_swift_entries_with_fail_exceptions(self, swift_api_mock):
CONF.set_override('data_backend', 'swift', group='inventory')
- container = 'introspection_data'
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -172,115 +171,93 @@ class IntrospectionDataStorageFunctionsTestCase(db_base.DbTestCase):
super(IntrospectionDataStorageFunctionsTestCase, self).setUp()
self.node = obj_utils.create_test_node(self.context)
- def test_store_introspection_data_db(self):
- CONF.set_override('data_backend', 'database',
- group='inventory')
- fake_introspection_data = {'inventory': self.fake_inventory_data,
- **self.fake_plugin_data}
+ def test_store_inspection_data_db(self):
+ CONF.set_override('data_backend', 'database', group='inventory')
fake_context = ironic_context.RequestContext()
- utils.store_introspection_data(self.node, fake_introspection_data,
- fake_context)
+ utils.store_inspection_data(self.node, self.fake_inventory_data,
+ self.fake_plugin_data, fake_context)
stored = objects.NodeInventory.get_by_node_id(self.context,
self.node.id)
self.assertEqual(self.fake_inventory_data, stored["inventory_data"])
self.assertEqual(self.fake_plugin_data, stored["plugin_data"])
- @mock.patch.object(utils, '_store_introspection_data_in_swift',
+ @mock.patch.object(utils, '_store_inspection_data_in_swift',
autospec=True)
- def test_store_introspection_data_swift(self, mock_store_data):
+ def test_store_inspection_data_swift(self, mock_store_data):
CONF.set_override('data_backend', 'swift', group='inventory')
CONF.set_override(
- 'swift_data_container', 'introspection_data',
+ 'swift_data_container', 'inspection_data',
group='inventory')
- fake_introspection_data = {
- "inventory": self.fake_inventory_data, **self.fake_plugin_data}
fake_context = ironic_context.RequestContext()
- utils.store_introspection_data(self.node, fake_introspection_data,
- fake_context)
+ utils.store_inspection_data(self.node, self.fake_inventory_data,
+ self.fake_plugin_data, fake_context)
mock_store_data.assert_called_once_with(
self.node.uuid, inventory_data=self.fake_inventory_data,
plugin_data=self.fake_plugin_data)
- def test_store_introspection_data_nostore(self):
+ def test_store_inspection_data_nostore(self):
CONF.set_override('data_backend', 'none', group='inventory')
- fake_introspection_data = {
- "inventory": self.fake_inventory_data, **self.fake_plugin_data}
fake_context = ironic_context.RequestContext()
- ret = utils.store_introspection_data(self.node,
- fake_introspection_data,
- fake_context)
- self.assertIsNone(ret)
-
- def test__node_inventory_convert(self):
- required_output = {"inventory": self.fake_inventory_data,
- "plugin_data": self.fake_plugin_data}
- input_given = {}
- input_given["inventory_data"] = self.fake_inventory_data
- input_given["plugin_data"] = self.fake_plugin_data
- input_given["booom"] = "boom"
- ret = utils._node_inventory_convert(input_given)
- self.assertEqual(required_output, ret)
-
- @mock.patch.object(utils, '_node_inventory_convert', autospec=True)
- @mock.patch.object(objects, 'NodeInventory', spec_set=True, autospec=True)
- def test_get_introspection_data_db(self, mock_inventory, mock_convert):
- CONF.set_override('data_backend', 'database',
- group='inventory')
- fake_introspection_data = {'inventory': self.fake_inventory_data,
- 'plugin_data': self.fake_plugin_data}
+ utils.store_inspection_data(self.node, self.fake_inventory_data,
+ self.fake_plugin_data, fake_context)
+ self.assertRaises(exception.NodeInventoryNotFound,
+ objects.NodeInventory.get_by_node_id,
+ self.context, self.node.id)
+
+ def test_get_inspection_data_db(self):
+ CONF.set_override('data_backend', 'database', group='inventory')
+ obj_utils.create_test_inventory(
+ self.context, self.node,
+ inventory_data=self.fake_inventory_data,
+ plugin_data=self.fake_plugin_data)
fake_context = ironic_context.RequestContext()
- mock_inventory.get_by_node_id.return_value = fake_introspection_data
- utils.get_introspection_data(self.node, fake_context)
- mock_convert.assert_called_once_with(fake_introspection_data)
-
- @mock.patch.object(objects, 'NodeInventory', spec_set=True, autospec=True)
- def test_get_introspection_data_db_exception(self, mock_inventory):
- CONF.set_override('data_backend', 'database',
- group='inventory')
+ ret = utils.get_inspection_data(self.node, fake_context)
+ fake_inspection_data = {'inventory': self.fake_inventory_data,
+ 'plugin_data': self.fake_plugin_data}
+ self.assertEqual(ret, fake_inspection_data)
+
+ def test_get_inspection_data_db_exception(self):
+ CONF.set_override('data_backend', 'database', group='inventory')
fake_context = ironic_context.RequestContext()
- mock_inventory.get_by_node_id.side_effect = [
- exception.NodeInventoryNotFound(self.node.uuid)]
self.assertRaises(
- exception.NodeInventoryNotFound, utils.get_introspection_data,
+ exception.NodeInventoryNotFound, utils.get_inspection_data,
self.node, fake_context)
- @mock.patch.object(utils, '_get_introspection_data_from_swift',
- autospec=True)
- def test_get_introspection_data_swift(self, mock_get_data):
+ @mock.patch.object(utils, '_get_inspection_data_from_swift', autospec=True)
+ def test_get_inspection_data_swift(self, mock_get_data):
CONF.set_override('data_backend', 'swift', group='inventory')
CONF.set_override(
- 'swift_data_container', 'introspection_data',
+ 'swift_data_container', 'inspection_data',
group='inventory')
fake_context = ironic_context.RequestContext()
- utils.get_introspection_data(self.node, fake_context)
- mock_get_data.assert_called_once_with(
- self.node.uuid)
+ ret = utils.get_inspection_data(self.node, fake_context)
+ mock_get_data.assert_called_once_with(self.node.uuid)
+ self.assertEqual(mock_get_data.return_value, ret)
- @mock.patch.object(utils, '_get_introspection_data_from_swift',
- autospec=True)
- def test_get_introspection_data_swift_exception(self, mock_get_data):
+ @mock.patch.object(utils, '_get_inspection_data_from_swift', autospec=True)
+ def test_get_inspection_data_swift_exception(self, mock_get_data):
CONF.set_override('data_backend', 'swift', group='inventory')
CONF.set_override(
- 'swift_data_container', 'introspection_data',
+ 'swift_data_container', 'inspection_data',
group='inventory')
fake_context = ironic_context.RequestContext()
mock_get_data.side_effect = exception.SwiftObjectNotFoundError()
self.assertRaises(
- exception.NodeInventoryNotFound, utils.get_introspection_data,
+ exception.NodeInventoryNotFound, utils.get_inspection_data,
self.node, fake_context)
- def test_get_introspection_data_nostore(self):
+ def test_get_inspection_data_nostore(self):
CONF.set_override('data_backend', 'none', group='inventory')
fake_context = ironic_context.RequestContext()
self.assertRaises(
- exception.NodeInventoryNotFound, utils.get_introspection_data,
+ exception.NodeInventoryNotFound, utils.get_inspection_data,
self.node, fake_context)
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
- def test__store_introspection_data_in_swift(self, swift_api_mock):
- container = 'introspection_data'
+ def test__store_inspection_data_in_swift(self, swift_api_mock):
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
- utils._store_introspection_data_in_swift(
+ utils._store_inspection_data_in_swift(
self.node.uuid, self.fake_inventory_data, self.fake_plugin_data)
swift_obj_mock = swift_api_mock.return_value
object_name = 'inspector_data-' + str(self.node.uuid)
@@ -291,23 +268,22 @@ class IntrospectionDataStorageFunctionsTestCase(db_base.DbTestCase):
container)])
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
- def test__get_introspection_data_from_swift(self, swift_api_mock):
- container = 'introspection_data'
+ def test__get_inspection_data_from_swift(self, swift_api_mock):
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
swift_obj_mock.get_object.side_effect = [
self.fake_inventory_data,
self.fake_plugin_data
]
- ret = utils._get_introspection_data_from_swift(self.node.uuid)
+ ret = utils._get_inspection_data_from_swift(self.node.uuid)
req_ret = {"inventory": self.fake_inventory_data,
"plugin_data": self.fake_plugin_data}
self.assertEqual(req_ret, ret)
@mock.patch.object(swift, 'SwiftAPI', autospec=True)
- def test__get_introspection_data_from_swift_exception(self,
- swift_api_mock):
- container = 'introspection_data'
+ def test__get_inspection_data_from_swift_exception(self, swift_api_mock):
+ container = 'inspection_data'
CONF.set_override('swift_data_container', container, group='inventory')
swift_obj_mock = swift_api_mock.return_value
swift_obj_mock.get_object.side_effect = [
@@ -315,5 +291,5 @@ class IntrospectionDataStorageFunctionsTestCase(db_base.DbTestCase):
self.fake_plugin_data
]
self.assertRaises(exception.SwiftObjectNotFoundError,
- utils._get_introspection_data_from_swift,
+ utils._get_inspection_data_from_swift,
self.node.uuid)
diff --git a/ironic/tests/unit/drivers/modules/test_pxe.py b/ironic/tests/unit/drivers/modules/test_pxe.py
index e7d444104..f16366470 100644
--- a/ironic/tests/unit/drivers/modules/test_pxe.py
+++ b/ironic/tests/unit/drivers/modules/test_pxe.py
@@ -550,6 +550,8 @@ class PXEBootTestCase(db_base.DbTestCase):
def test_prepare_instance_ramdisk_pxe_conf_exists(self):
self._test_prepare_instance_ramdisk(config_file_exits=False)
+ @mock.patch.object(boot_mode_utils, 'configure_secure_boot_if_needed',
+ autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@@ -567,7 +569,7 @@ class PXEBootTestCase(db_base.DbTestCase):
self, exec_mock, write_file_mock, render_mock, api_url_mock,
boot_opt_mock, get_image_info_mock, cache_mock, dhcp_factory_mock,
create_pxe_config_mock, switch_pxe_config_mock,
- set_boot_device_mock):
+ set_boot_device_mock, mock_conf_sec_boot):
image_info = {'kernel': ['ins_kernel_id', '/path/to/kernel'],
'ramdisk': ['ins_ramdisk_id', '/path/to/ramdisk'],
'stage2': ['ins_stage2_id', '/path/to/stage2'],
@@ -611,6 +613,7 @@ class PXEBootTestCase(db_base.DbTestCase):
set_boot_device_mock.assert_called_once_with(task,
boot_devices.PXE,
persistent=True)
+ self.assertFalse(mock_conf_sec_boot.called)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@@ -786,11 +789,13 @@ class PXEAnacondaDeployTestCase(db_base.DbTestCase):
task.driver.deploy.prepare(task)
mock_prepare_instance.assert_called_once_with(mock.ANY, task)
+ @mock.patch.object(boot_mode_utils, 'configure_secure_boot_if_needed',
+ autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_env', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
def test_reboot_to_instance(self, mock_set_boot_dev, mock_image_info,
- mock_cleanup_pxe_env):
+ mock_cleanup_pxe_env, mock_conf_sec_boot):
image_info = {'kernel': ('', '/path/to/kernel'),
'ramdisk': ('', '/path/to/ramdisk'),
'stage2': ('', '/path/to/stage2'),
@@ -802,6 +807,7 @@ class PXEAnacondaDeployTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.reboot_to_instance(task)
mock_set_boot_dev.assert_called_once_with(task, boot_devices.DISK)
+ mock_conf_sec_boot.assert_called_once_with(task)
mock_cleanup_pxe_env.assert_called_once_with(task, image_info,
ipxe_enabled=False)
diff --git a/ironic/tests/unit/drivers/modules/test_snmp.py b/ironic/tests/unit/drivers/modules/test_snmp.py
index 5391d7ac5..e1b6fc1df 100644
--- a/ironic/tests/unit/drivers/modules/test_snmp.py
+++ b/ironic/tests/unit/drivers/modules/test_snmp.py
@@ -46,6 +46,41 @@ class SNMPClientTestCase(base.TestCase):
self.value = 'value'
@mock.patch.object(pysnmp, 'SnmpEngine', autospec=True)
+ def test__get_client(self, mock_snmpengine):
+ driver_info = db_utils.get_test_snmp_info(
+ snmp_address=self.address,
+ snmp_port=self.port,
+ snmp_user='test-user',
+ snmp_auth_protocol='sha',
+ snmp_auth_key='test-auth-key',
+ snmp_priv_protocol='aes',
+ snmp_priv_key='test-priv-key',
+ snmp_context_engine_id='test-engine-id',
+ snmp_context_name='test-context-name',
+ snmp_version='3')
+ node = obj_utils.get_test_node(
+ self.context,
+ driver_info=driver_info)
+ info = snmp._parse_driver_info(node)
+
+ client = snmp._get_client(info)
+
+ mock_snmpengine.assert_called_once_with()
+ self.assertEqual(self.address, client.address)
+ self.assertEqual(int(self.port), client.port)
+ self.assertEqual(snmp.SNMP_V3, client.version)
+ self.assertNotIn('read_community', client.__dict__)
+ self.assertNotIn('write_community', client.__dict__)
+ self.assertEqual('test-user', client.user)
+ self.assertEqual(pysnmp.usmHMACSHAAuthProtocol, client.auth_proto)
+ self.assertEqual('test-auth-key', client.auth_key)
+ self.assertEqual(pysnmp.usmAesCfb128Protocol, client.priv_proto)
+ self.assertEqual('test-priv-key', client.priv_key)
+ self.assertEqual('test-engine-id', client.context_engine_id)
+ self.assertEqual('test-context-name', client.context_name)
+ self.assertEqual(mock_snmpengine.return_value, client.snmp_engine)
+
+ @mock.patch.object(pysnmp, 'SnmpEngine', autospec=True)
def test___init__(self, mock_snmpengine):
client = snmp.SNMPClient(self.address, self.port, snmp.SNMP_V1)
mock_snmpengine.assert_called_once_with()
diff --git a/ironic/tests/unit/drivers/test_fake_hardware.py b/ironic/tests/unit/drivers/test_fake_hardware.py
index 70460a6a4..637f52bf9 100644
--- a/ironic/tests/unit/drivers/test_fake_hardware.py
+++ b/ironic/tests/unit/drivers/test_fake_hardware.py
@@ -17,6 +17,8 @@
"""Test class for Fake driver."""
+import time
+from unittest import mock
from ironic.common import boot_devices
from ironic.common import boot_modes
@@ -26,6 +28,7 @@ from ironic.common import indicator_states
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers import base as driver_base
+from ironic.drivers.modules import fake
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
@@ -164,3 +167,29 @@ class FakeHardwareTestCase(db_base.DbTestCase):
self.assertEqual({}, self.driver.inspect.get_properties())
self.driver.inspect.validate(self.task)
self.driver.inspect.inspect_hardware(self.task)
+
+ def test_parse_sleep_range(self):
+ self.assertEqual((0, 0), fake.parse_sleep_range('0'))
+ self.assertEqual((0, 0), fake.parse_sleep_range(''))
+ self.assertEqual((1, 1), fake.parse_sleep_range('1'))
+ self.assertEqual((1, 10), fake.parse_sleep_range('1,10'))
+ self.assertEqual((10, 20), fake.parse_sleep_range('10, 20'))
+
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_sleep_zero(self, mock_sleep):
+ fake.sleep("0")
+ mock_sleep.assert_not_called()
+
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_sleep_one(self, mock_sleep):
+ fake.sleep("1")
+ mock_sleep.assert_called_once_with(1)
+
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_sleep_range(self, mock_sleep):
+ for i in range(100):
+ fake.sleep("1,10")
+ for call in mock_sleep.call_args_list:
+ v = call[0][0]
+ self.assertGreaterEqual(v, 1)
+ self.assertLessEqual(v, 10)
diff --git a/ironic/tests/unit/objects/utils.py b/ironic/tests/unit/objects/utils.py
index 26c3a22e7..979ab28a2 100644
--- a/ironic/tests/unit/objects/utils.py
+++ b/ironic/tests/unit/objects/utils.py
@@ -380,3 +380,15 @@ class SchemasTestMixIn(object):
"for %s, schema key %s has invalid %s "
"field %s" % (payload, schema_key, resource,
key))
+
+
+def create_test_inventory(ctxt, node, **kw):
+ """Create and return a test node inventory object."""
+ inv = objects.NodeInventory(ctxt)
+ if not isinstance(node, str):
+ node = node.id
+ kw['node_id'] = node
+ for key, value in kw.items():
+ setattr(inv, key, value)
+ inv.create()
+ return inv
diff --git a/playbooks/metal3-ci/fetch_kube_logs.yaml b/playbooks/metal3-ci/fetch_kube_logs.yaml
new file mode 100644
index 000000000..a20294178
--- /dev/null
+++ b/playbooks/metal3-ci/fetch_kube_logs.yaml
@@ -0,0 +1,32 @@
+---
+- name: Create the target directory
+ file:
+ path: "{{ logs_management_cluster }}/{{ namespace }}"
+ state: directory
+
+- name: Fetch pods list
+ command: kubectl get pods -n "{{ namespace }}" -o json
+ ignore_errors: true
+ register: pods_result
+
+- block:
+ - name: Save the pods list
+ copy:
+ dest: "{{ logs_management_cluster }}/{{ namespace }}/pods.yaml"
+ content: "{{ pods_result.stdout }}"
+
+ - name: Set pod names
+ set_fact:
+ pods: "{{ pods_result.stdout | from_json | json_query('items[*].metadata.name') }}"
+
+ - include_tasks: fetch_pod_logs.yaml
+ loop: "{{ pods }}"
+ loop_control:
+ loop_var: pod
+ when: pods_result is succeeded
+
+- name: Fetch secrets
+ shell: |
+ kubectl get secrets -n "{{ namespace }}" -o yaml \
+ > "{{ logs_management_cluster }}/{{ namespace }}/secrets.yaml"
+ ignore_errors: true
diff --git a/playbooks/metal3-ci/fetch_pod_logs.yaml b/playbooks/metal3-ci/fetch_pod_logs.yaml
new file mode 100644
index 000000000..077b2d319
--- /dev/null
+++ b/playbooks/metal3-ci/fetch_pod_logs.yaml
@@ -0,0 +1,24 @@
+---
+- name: Create the target directory
+ file:
+ path: "{{ logs_management_cluster }}/{{ namespace }}/{{ pod }}"
+ state: directory
+
+- name: Fetch pod information
+ command: kubectl get pod -n "{{ namespace }}" -o json "{{ pod }}"
+ register: pod_result
+
+- name: Process pod JSON
+ set_fact:
+ pod_json: "{{ pod_result.stdout | from_json }}"
+
+- name: Set container names
+ set_fact:
+ containers: "{{ pod_json.spec.containers | map(attribute='name') | list }}"
+ init_containers: "{{ pod_json.spec.initContainers | default([]) | map(attribute='name') | list }}"
+
+- name: Fetch container logs
+ shell: |
+ kubectl logs -n "{{ namespace }}" "{{ pod }}" "{{ item }}" \
+ > "{{ logs_management_cluster }}/{{ namespace }}/{{ pod }}/{{ item }}.log" 2>&1
+ loop: "{{ containers + init_containers }}"
diff --git a/playbooks/metal3-ci/post.yaml b/playbooks/metal3-ci/post.yaml
new file mode 100644
index 000000000..0e26579f2
--- /dev/null
+++ b/playbooks/metal3-ci/post.yaml
@@ -0,0 +1,194 @@
+---
+- hosts: all
+ tasks:
+ - name: Set the logs root
+ set_fact:
+ logs_root: "{{ ansible_user_dir }}/metal3-logs"
+
+ - name: Set log locations and containers
+ set_fact:
+ logs_before_pivoting: "{{ logs_root }}/before_pivoting"
+ logs_after_pivoting: "{{ logs_root }}/after_pivoting"
+ logs_management_cluster: "{{ logs_root }}/management_cluster"
+ containers:
+ - dnsmasq
+ - httpd-infra
+ - ironic
+ - ironic-endpoint-keepalived
+ - ironic-inspector
+ - ironic-log-watch
+ - registry
+ - sushy-tools
+ - vbmc
+ namespaces:
+ - baremetal-operator-system
+ - capi-system
+ - metal3
+
+ - name: Create log locations
+ file:
+ path: "{{ item }}"
+ state: directory
+ loop:
+ - "{{ logs_before_pivoting }}"
+ - "{{ logs_after_pivoting }}"
+ - "{{ logs_management_cluster }}"
+ - "{{ logs_root }}/libvirt"
+ - "{{ logs_root }}/system"
+
+ - name: Check if the logs before pivoting were stored
+ stat:
+ path: /tmp/docker
+ register: before_pivoting_result
+
+ - name: Copy logs before pivoting
+ copy:
+ src: /tmp/docker/
+ dest: "{{ logs_before_pivoting }}/"
+ remote_src: true
+ when: before_pivoting_result.stat.exists
+
+ - name: Set log location for containers (pivoting happened)
+ set_fact:
+ container_logs: "{{ logs_after_pivoting }}"
+ when: before_pivoting_result.stat.exists
+
+ - name: Set log location for containers (no pivoting)
+ set_fact:
+ container_logs: "{{ logs_before_pivoting }}"
+ when: not before_pivoting_result.stat.exists
+
+ - name: Fetch current container logs
+ shell: >
+ docker logs "{{ item }}" > "{{ container_logs }}/{{ item }}.log" 2>&1
+ become: true
+ ignore_errors: true
+ loop: "{{ containers }}"
+
+ - name: Fetch libvirt networks
+ shell: >
+ virsh net-dumpxml "{{ item }}" > "{{ logs_root }}/libvirt/net-{{ item }}.xml"
+ become: true
+ ignore_errors: true
+ loop:
+ - baremetal
+ - provisioning
+
+ - name: Fetch libvirt VMs
+ shell: |
+ for vm in $(virsh list --name --all); do
+ virsh dumpxml "$vm" > "{{ logs_root }}/libvirt/vm-$vm.xml"
+ done
+ become: true
+ ignore_errors: true
+
+ - name: Fetch system information
+ shell: "{{ item }} > {{ logs_root }}/system/{{ item | replace(' ', '-') }}.txt"
+ become: true
+ ignore_errors: true
+ loop:
+ - dmesg
+ - dpkg -l
+ - ip addr
+ - ip route
+ - iptables -L -v -n
+ - journalctl -b -o with-unit
+ - journalctl -u libvirtd
+ - pip freeze
+ - docker images
+ - docker ps --all
+ - systemctl
+
+ - name: Copy libvirt logs
+ copy:
+ src: /var/log/libvirt/qemu/
+ dest: "{{ logs_root }}/libvirt/"
+ remote_src: true
+ become: true
+
+ - name: Check if we have a cluster
+ command: kubectl cluster-info
+ ignore_errors: true
+ register: kubectl_result
+
+ - include_tasks: fetch_kube_logs.yaml
+ loop: "{{ namespaces }}"
+ loop_control:
+ loop_var: namespace
+ when: kubectl_result is succeeded
+
+ - name: Collect kubernetes resources
+ shell: |
+ kubectl get "{{ item }}" -A -o yaml > "{{ logs_management_cluster }}/{{ item }}.yaml"
+ loop:
+ - baremetalhosts
+ - clusters
+ - endpoints
+ - hostfirmwaresettings
+ - machines
+ - metal3ipaddresses
+ - metal3ippools
+ - metal3machines
+ - nodes
+ - pods
+ - preprovisioningimages
+ - services
+ ignore_errors: true
+ when: kubectl_result is succeeded
+
+ # FIXME(dtantsur): this is horrible, do something about it
+ - name: Fetch kubelet status logs from the master user metal3
+ shell: |
+ ssh -vvv -o StrictHostKeyChecking=accept-new metal3@192.168.111.100 "sudo systemctl status kubelet" > "{{ logs_root }}/kubelet-0-metal3-status.log"
+ ignore_errors: true
+ register: kubelet0metal3status
+
+ - debug:
+ var: kubelet0metal3status.stdout_lines
+
+ - debug:
+ var: kubelet0metal3status.stderr_lines
+
+ - name: Fetch kubelet journal logs from the master user metal3
+ shell: |
+ ssh -vvv -o StrictHostKeyChecking=accept-new metal3@192.168.111.100 "sudo journalctl -xeu kubelet" > "{{ logs_root }}/kubelet-0-metal3-journal.log"
+ ignore_errors: true
+ register: kubelet0metal3journal
+
+ - debug:
+ var: kubelet0metal3journal.stdout_lines
+
+ - debug:
+ var: kubelet0metal3journal.stderr_lines
+
+ - name: Fetch kubelet status logs from the master user zuul
+ shell: |
+ ssh -vvv -o StrictHostKeyChecking=accept-new zuul@192.168.111.100 "sudo systemctl status kubelet" > "{{ logs_root }}/kubelet-0-zuul-status.log"
+ ignore_errors: true
+ register: kubelet0zuulstatus
+
+ - debug:
+ var: kubelet0zuulstatus.stdout_lines
+
+ - debug:
+ var: kubelet0zuulstatus.stderr_lines
+
+ - name: Fetch kubelet journal logs from the master user zuul
+ shell: |
+ ssh -vvv -o StrictHostKeyChecking=accept-new zuul@192.168.111.100 "sudo journalctl -xeu kubelet" > "{{ logs_root }}/kubelet-0-zuul-journal.log"
+ ignore_errors: true
+ register: kubelet0zuuljournal
+
+ - debug:
+ var: kubelet0zuuljournal.stdout_lines
+
+ - debug:
+ var: kubelet0zuuljournal.stderr_lines
+ # # #
+
+ - name: Copy logs to the zuul location
+ synchronize:
+ src: "{{ logs_root }}/"
+ dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}/"
+ mode: pull
+ become: true
diff --git a/playbooks/metal3-ci/run.yaml b/playbooks/metal3-ci/run.yaml
new file mode 100644
index 000000000..d49ddc6de
--- /dev/null
+++ b/playbooks/metal3-ci/run.yaml
@@ -0,0 +1,37 @@
+---
+- hosts: all
+ tasks:
+ - name: Define the metal3 variables
+ set_fact:
+ metal3_dev_env_src_dir: '{{ ansible_user_dir }}/metal3-dev-env'
+ metal3_environment:
+ ANSIBLE_VERBOSITY: 2
+ CONTROL_PLANE_MACHINE_COUNT: 1
+ IMAGE_OS: ubuntu
+ IMAGE_USERNAME: zuul
+ # NOTE(dtantsur): we don't have enough resources to provision even
+ # a 2-node cluster, so only provision a control plane node.
+ NUM_NODES: 2
+ LIBVIRT_DOMAIN_TYPE: "qemu"
+ WORKER_MACHINE_COUNT: 1
+
+ # TODO(dtantsur): add metal3-io/metal3-dev-env as a recognized project to
+ # https://opendev.org/openstack/project-config/src/commit/e15b9cae77bdc243322cee64b3688a2a43dd193c/zuul/main.yaml#L1416
+ - name: Clone metal3-dev-env
+ git:
+ dest: "{{ metal3_dev_env_src_dir }}"
+ repo: "https://github.com/metal3-io/metal3-dev-env"
+
+ - name: Build a metal3 environment
+ command: make
+ args:
+ chdir: "{{ metal3_dev_env_src_dir }}"
+ environment: "{{ metal3_environment }}"
+
+# NOTE(rpittau) skip the tests for the time begin, they imply the presence of
+# 2 nodes, 1 control plus 1 worker
+# - name: Run metal3 tests
+# command: make test
+# args:
+# chdir: "{{ metal3_dev_env_src_dir }}"
+# environment: "{{ metal3_environment }}"
diff --git a/releasenotes/notes/bug-2010613-3ab1f32aaa776f28.yaml b/releasenotes/notes/bug-2010613-3ab1f32aaa776f28.yaml
new file mode 100644
index 000000000..89769bfb8
--- /dev/null
+++ b/releasenotes/notes/bug-2010613-3ab1f32aaa776f28.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ [`bug 2010613 <https://storyboard.openstack.org/#!/story/2010613>`_]
+ Fixes issue with SNMP v3 auth protocol and priv protocol set in
+ driver info not being retrieved correctly when a SNMP client is
+ initialized.
diff --git a/releasenotes/notes/cleaning-error-5c13c33c58404b97.yaml b/releasenotes/notes/cleaning-error-5c13c33c58404b97.yaml
new file mode 100644
index 000000000..270278f1b
--- /dev/null
+++ b/releasenotes/notes/cleaning-error-5c13c33c58404b97.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ When aborting cleaning, the ``last_error`` field is no longer initially
+ empty. It is now populated on the state transition to ``clean failed``.
+ - |
+ When cleaning or deployment fails, the ``last_error`` field is no longer
+ temporary set to ``None`` while the power off action is running.
diff --git a/releasenotes/notes/conductor-metric-collector-support-1b8b8c71f9f59da4.yaml b/releasenotes/notes/conductor-metric-collector-support-1b8b8c71f9f59da4.yaml
new file mode 100644
index 000000000..dfa3b0f89
--- /dev/null
+++ b/releasenotes/notes/conductor-metric-collector-support-1b8b8c71f9f59da4.yaml
@@ -0,0 +1,39 @@
+---
+features:
+ - |
+ Adds the ability for Ironic to send conductor process metrics
+ for monitoring. This requires the use of a new ``[metrics]backend``
+ option value of ``collector``. This data was previously only available
+ through the use of statsd. This requires ``ironic-lib`` version ``5.4.0``
+ or newer. This capability can be disabled using the
+ ``[sensor_data]enable_for_conductor`` option if set to False.
+ - |
+ Adds a ``[sensor_data]enable_for_nodes`` configuration option
+ to allow operators to disable sending node metric data via the
+ message bus notifier.
+ - |
+ Adds a new gauge metric ``ConductorManager.PowerSyncNodesCount``
+ which tracks the nodes considered for power state synchrnozation.
+ - Adds a new gauge metric ``ConductorManager.PowerSyncRecoveryNodeCount``
+ which represents the number of nodes which are being evaluated for power
+ state recovery checking.
+ - Adds a new gauge metric ``ConductorManager.SyncLocalStateNodeCount``
+ which represents the number of nodes being tracked locally by the
+ conductor.
+issues:
+ - Sensor data notifications to the message bus, such as using the
+ ``[metrics]backend`` configuration option of ``collector`` on a dedicated
+ API service process or instance, is not presently supported. This
+ functionality requires a periodic task to trigger the transmission
+ of metrics messages to the message bus notifier.
+deprecations:
+ - The setting values starting with ``send_sensor`` in the ``[conductor]``
+ configuration group have been deprecated and moved to a ``[sensor_data]``
+ configuration group. The names have been updated to shorter, operator
+ friendly names..
+upgrades:
+ - Settings starting with ``sensor_data`` in the ``[conductor]``
+ configuration group have been moved to a ``[sensor_data]`` configuration
+ group amd have been renamed to have shorter value names. If configuration
+ values are not updated, the ``oslo.config`` library will emit a warning
+ in the logs.
diff --git a/releasenotes/notes/fakedelay-7eac23ad8881a736.yaml b/releasenotes/notes/fakedelay-7eac23ad8881a736.yaml
new file mode 100644
index 000000000..fe02d33ff
--- /dev/null
+++ b/releasenotes/notes/fakedelay-7eac23ad8881a736.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ There are now configurable random wait times for fake drivers in a new
+ ironic.conf [fake] section. Each supported driver having one configuration
+ option controlling the delay. These delays are applied to operations which
+ typically block in other drivers. This allows more realistic scenarios to
+ be arranged for performance and functional testing of ironic itself.
diff --git a/releasenotes/notes/fix-online-version-migration-db432a7b239647fa.yaml b/releasenotes/notes/fix-online-version-migration-db432a7b239647fa.yaml
new file mode 100644
index 000000000..824185aab
--- /dev/null
+++ b/releasenotes/notes/fix-online-version-migration-db432a7b239647fa.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ Fixes an issue in the online upgrade logic where database models for
+ Node Traits and BIOS Settings resulted in an error when performing
+ the online data migration. This was because these tables were originally
+ created as extensions of the Nodes database table, and the schema
+ of the database was slightly different enough to result in an error
+ if there was data to migrate in these tables upon upgrade,
+ which would have occured if an early BIOS Setting adopter had
+ data in the database prior to upgrading to the Yoga release of Ironic.
+
+ The online upgrade parameter now subsitutes an alternate primary key name
+ name when applicable.
diff --git a/releasenotes/notes/fix-power-off-token-wipe-e7d605997f00d39d.yaml b/releasenotes/notes/fix-power-off-token-wipe-e7d605997f00d39d.yaml
new file mode 100644
index 000000000..14a489b46
--- /dev/null
+++ b/releasenotes/notes/fix-power-off-token-wipe-e7d605997f00d39d.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes an issue where an agent token could be inadvertently orphaned
+ if a node is already in the target power state when we attempt to turn
+ the node off.
diff --git a/releasenotes/notes/fix_boot_mode_switch_with_anaconda_deploy_with_ilo_drivers-16637adb62f0ed2f.yaml b/releasenotes/notes/fix_boot_mode_switch_with_anaconda_deploy_with_ilo_drivers-16637adb62f0ed2f.yaml
new file mode 100644
index 000000000..c3096be79
--- /dev/null
+++ b/releasenotes/notes/fix_boot_mode_switch_with_anaconda_deploy_with_ilo_drivers-16637adb62f0ed2f.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Enables boot mode switching during anaconda deploy for ``ilo``
+ and ``ilo5`` hardware types.
diff --git a/releasenotes/notes/fix_secure_boot_with_anaconda_deploy-84d7c1e3bbfa40f2.yaml b/releasenotes/notes/fix_secure_boot_with_anaconda_deploy-84d7c1e3bbfa40f2.yaml
new file mode 100644
index 000000000..a03289c42
--- /dev/null
+++ b/releasenotes/notes/fix_secure_boot_with_anaconda_deploy-84d7c1e3bbfa40f2.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Fixes secure boot with anaconda deploy.
diff --git a/releasenotes/notes/ironic-antelope-prelude-0b77964469f56b13.yaml b/releasenotes/notes/ironic-antelope-prelude-0b77964469f56b13.yaml
new file mode 100644
index 000000000..98bf9c014
--- /dev/null
+++ b/releasenotes/notes/ironic-antelope-prelude-0b77964469f56b13.yaml
@@ -0,0 +1,14 @@
+---
+prelude: >
+ The Ironic team hereby announces the release of OpenStack 2023.1
+ (Ironic 23.4.0). This repesents the completion of a six month development
+ cycle, which primarily focused on internal and scaling improvements.
+ Those improvements included revamping the database layer to improve
+ performance and ensure compatability with new versions of SQLAlchemy,
+ enhancing the ironic-conductor service to export application metrics to
+ prometheus via the ironic-prometheus-exporter, and the addition of a
+ new API concept of node sharding to help with scaling of services that
+ make frequent API calls to Ironic.
+
+ The new Ironic release also comes with a slew of bugfixes for Ironic
+ services and hardware drivers. We sincerely hope you enjoy it!
diff --git a/releasenotes/notes/no-recalculate-653e524fd6160e72.yaml b/releasenotes/notes/no-recalculate-653e524fd6160e72.yaml
new file mode 100644
index 000000000..3d2e6dad4
--- /dev/null
+++ b/releasenotes/notes/no-recalculate-653e524fd6160e72.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ No longer re-calculates checksums for images that are already raw.
+ Previously, it would cause significant delays in deploying raw images.
diff --git a/releasenotes/notes/wipe-agent-token-upon-cleaning-timeout-c9add514fad1b02c.yaml b/releasenotes/notes/wipe-agent-token-upon-cleaning-timeout-c9add514fad1b02c.yaml
new file mode 100644
index 000000000..0aa828ccd
--- /dev/null
+++ b/releasenotes/notes/wipe-agent-token-upon-cleaning-timeout-c9add514fad1b02c.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes an issue where an agent token was being orphaned if a baremetal node
+ timed out during cleaning operations, leading to issues where the node
+ would not be able to establish a new token with Ironic upon future
+ in some cases. We now always wipe the token in this case.
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 000000000..d1238479b
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 107450a67..78f7dc0f8 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ 2023.1
zed
yoga
xena
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 6878c43c6..c3581ff0d 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Ironic Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-02-01 23:20+0000\n"
+"POT-Creation-Date: 2023-03-08 03:30+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -302,9 +302,6 @@ msgstr "20.1.0"
msgid "20.1.1"
msgstr "20.1.1"
-msgid "20.1.1-6"
-msgstr "20.1.1-6"
-
msgid "20.2.0"
msgstr "20.2.0"
@@ -314,18 +311,12 @@ msgstr "21.0.0"
msgid "21.1.0"
msgstr "21.1.0"
-msgid "21.1.0-6"
-msgstr "21.1.0-6"
-
msgid "21.2.0"
msgstr "21.2.0"
msgid "21.3.0"
msgstr "21.3.0"
-msgid "21.3.0-4"
-msgstr "21.3.0-4"
-
msgid "4.0.0 First semver release"
msgstr "4.0.0 First semver release"
diff --git a/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po
new file mode 100644
index 000000000..81262b544
--- /dev/null
+++ b/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po
@@ -0,0 +1,159 @@
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Akihiro Motoki <amotoki@gmail.com>, 2016. #zanata
+# Akihito INOH <aki-inou@rs.jp.nec.com>, 2018. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: Ironic Release Notes\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2023-03-08 03:30+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2018-02-15 11:45+0000\n"
+"Last-Translator: Akihito INOH <aki-inou@rs.jp.nec.com>\n"
+"Language-Team: Japanese\n"
+"Language: ja\n"
+"X-Generator: Zanata 4.3.3\n"
+"Plural-Forms: nplurals=1; plural=0\n"
+
+msgid ""
+"\"Port group\" support allows users to take advantage of bonded network "
+"interfaces."
+msgstr ""
+"\"Port group\" のサポートにより、ユーザーはボンディングされたネットワークイン"
+"ターフェースが利用できるようになります。"
+
+msgid "10.0.0"
+msgstr "10.0.0"
+
+msgid "10.1.0"
+msgstr "10.1.0"
+
+msgid "4.2.2"
+msgstr "4.2.2"
+
+msgid "4.2.3"
+msgstr "4.2.3"
+
+msgid "4.2.4"
+msgstr "4.2.4"
+
+msgid "4.2.5"
+msgstr "4.2.5"
+
+msgid "4.3.0"
+msgstr "4.3.0"
+
+msgid "443, 80"
+msgstr "443, 80"
+
+msgid "5.0.0"
+msgstr "5.0.0"
+
+msgid "5.1.0"
+msgstr "5.1.0"
+
+msgid "5.1.1"
+msgstr "5.1.1"
+
+msgid "5.1.2"
+msgstr "5.1.2"
+
+msgid "5.1.3"
+msgstr "5.1.3"
+
+msgid "6.0.0"
+msgstr "6.0.0"
+
+msgid "6.1.0"
+msgstr "6.1.0"
+
+msgid "6.2.0"
+msgstr "6.2.0"
+
+msgid "6.2.2"
+msgstr "6.2.2"
+
+msgid "6.2.3"
+msgstr "6.2.3"
+
+msgid "6.2.4"
+msgstr "6.2.4"
+
+msgid "6.3.0"
+msgstr "6.3.0"
+
+msgid "7.0.0"
+msgstr "7.0.0"
+
+msgid "7.0.1"
+msgstr "7.0.1"
+
+msgid "7.0.2"
+msgstr "7.0.2"
+
+msgid "7.0.3"
+msgstr "7.0.3"
+
+msgid "7.0.4"
+msgstr "7.0.4"
+
+msgid "8.0.0"
+msgstr "8.0.0"
+
+msgid "9.0.0"
+msgstr "9.0.0"
+
+msgid "9.0.1"
+msgstr "9.0.1"
+
+msgid "9.1.0"
+msgstr "9.1.0"
+
+msgid "9.1.1"
+msgstr "9.1.1"
+
+msgid "9.1.2"
+msgstr "9.1.2"
+
+msgid "9.1.3"
+msgstr "9.1.3"
+
+msgid "9.2.0"
+msgstr "9.2.0"
+
+msgid ""
+"A few major changes are worth mentioning. This is not an exhaustive list:"
+msgstr ""
+"いくつかの主要な変更がありました。全てではありませんが以下にリストを示しま"
+"す。"
+
+msgid "A few major changes since 9.1.x (Pike) are worth mentioning:"
+msgstr "9.1.x (Pike) からの主要な変更がいくつかありました。"
+
+msgid "Bug Fixes"
+msgstr "バグ修正"
+
+msgid "Current Series Release Notes"
+msgstr "開発中バージョンのリリースノート"
+
+msgid "Deprecation Notes"
+msgstr "廃止予定の機能"
+
+msgid "Known Issues"
+msgstr "既知の問題"
+
+msgid "New Features"
+msgstr "新機能"
+
+msgid "Option"
+msgstr "オプション"
+
+msgid "Other Notes"
+msgstr "その他の注意点"
+
+msgid "Security Issues"
+msgstr "セキュリティー上の問題"
+
+msgid "Upgrade Notes"
+msgstr "アップグレード時の注意"
diff --git a/requirements.txt b/requirements.txt
index 0c73e632e..2f4813baa 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,7 +14,7 @@ WebOb>=1.7.1 # MIT
python-cinderclient!=4.0.0,>=3.3.0 # Apache-2.0
python-glanceclient>=2.8.0 # Apache-2.0
keystoneauth1>=4.2.0 # Apache-2.0
-ironic-lib>=4.6.1 # Apache-2.0
+ironic-lib>=5.4.0 # Apache-2.0
python-swiftclient>=3.2.0 # Apache-2.0
pytz>=2013.6 # MIT
stevedore>=1.29.0 # Apache-2.0
diff --git a/tools/benchmark/generate-statistics.py b/tools/benchmark/generate-statistics.py
index e8327f3ac..e9fe0f56d 100644
--- a/tools/benchmark/generate-statistics.py
+++ b/tools/benchmark/generate-statistics.py
@@ -235,6 +235,7 @@ def _assess_db_object_and_api_performance_ports(mock_log, mock_request):
node_ident=None,
address=None,
portgroup_ident=None,
+ shard=None,
marker=None,
limit=None,
sort_key="id",
@@ -249,6 +250,7 @@ def _assess_db_object_and_api_performance_ports(mock_log, mock_request):
node_ident=None,
address=None,
portgroup_ident=None,
+ shard=None,
marker=res['ports'][-1]['uuid'],
limit=None,
sort_key="id",
diff --git a/tox.ini b/tox.ini
index 97ea9f707..1792d81c9 100644
--- a/tox.ini
+++ b/tox.ini
@@ -52,7 +52,7 @@ commands =
# the check and gate queues.
{toxinidir}/tools/run_bashate.sh {toxinidir}
# Check the *.rst files
- doc8 README.rst CONTRIBUTING.rst doc/source --ignore D001
+ doc8 README.rst CONTRIBUTING.rst doc/source api-ref/source --ignore D001
# Check to make sure reno releasenotes created with 'reno new'
{toxinidir}/tools/check-releasenotes.py
diff --git a/zuul.d/metal3-jobs.yaml b/zuul.d/metal3-jobs.yaml
new file mode 100644
index 000000000..9322c2103
--- /dev/null
+++ b/zuul.d/metal3-jobs.yaml
@@ -0,0 +1,30 @@
+- job:
+ name: metal3-base
+ abstract: true
+ description: Base job for metal3-dev-env based ironic jobs.
+ nodeset: openstack-single-node-jammy
+ run: playbooks/metal3-ci/run.yaml
+ post-run: playbooks/metal3-ci/post.yaml
+ timeout: 10800
+ required-projects:
+ - opendev.org/openstack/ironic
+ - opendev.org/openstack/ironic-inspector
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^api-ref/.*$
+ - ^doc/.*$
+ - ^driver-requirements.txt$
+ - ^install-guide/.*$
+ - ^ironic/locale/.*$
+ - ^ironic/tests/.*$
+ - ^ironic_inspector/locale/.*$
+ - ^ironic_inspector/test/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^test-requirements.txt$
+ - ^tox.ini$
+
+- job:
+ name: metal3-integration
+ description: Run metal3 CI on ironic.
+ parent: metal3-base
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 0f7ff75e1..8fbfbb929 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -57,6 +57,8 @@
voting: false
- bifrost-benchmark-ironic:
voting: false
+ - metal3-integration:
+ voting: false
gate:
jobs:
- ironic-tox-unit-with-driver-libs