summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/baremetal-api-v1-nodes-inventory.inc40
-rw-r--r--api-ref/source/baremetal-api-v1-nodes.inc26
-rw-r--r--api-ref/source/baremetal-api-v1-ports.inc5
-rw-r--r--api-ref/source/baremetal-api-v1-shards.inc56
-rw-r--r--api-ref/source/index.rst1
-rw-r--r--api-ref/source/parameters.yaml57
-rw-r--r--api-ref/source/samples/node-inventory-response.json31
-rw-r--r--api-ref/source/samples/shards-list-response.json12
-rw-r--r--devstack/lib/ironic73
-rw-r--r--devstack/plugin.sh2
-rwxr-xr-xdevstack/tools/ironic/scripts/cirros-partition.sh2
-rwxr-xr-xdevstack/upgrade/upgrade.sh2
-rw-r--r--doc/source/admin/anaconda-deploy-interface.rst37
-rw-r--r--doc/source/admin/drivers.rst1
-rw-r--r--doc/source/admin/drivers/fake.rst36
-rw-r--r--doc/source/admin/drivers/ibmc.rst2
-rw-r--r--doc/source/admin/drivers/ilo.rst83
-rw-r--r--doc/source/admin/drivers/irmc.rst70
-rw-r--r--doc/source/admin/drivers/redfish.rst83
-rw-r--r--doc/source/admin/drivers/snmp.rst74
-rw-r--r--doc/source/admin/hardware-burn-in.rst7
-rw-r--r--doc/source/admin/metrics.rst34
-rw-r--r--doc/source/admin/retirement.rst21
-rw-r--r--doc/source/admin/secure-rbac.rst40
-rw-r--r--doc/source/admin/troubleshooting.rst171
-rw-r--r--doc/source/contributor/dev-quickstart.rst7
-rw-r--r--doc/source/contributor/ironic-boot-from-volume.rst3
-rw-r--r--doc/source/contributor/releasing.rst52
-rw-r--r--doc/source/contributor/webapi-version-history.rst23
-rw-r--r--doc/source/install/include/common-prerequisites.inc10
-rw-r--r--driver-requirements.txt4
-rw-r--r--ironic/api/controllers/v1/__init__.py14
-rw-r--r--ironic/api/controllers/v1/node.py126
-rw-r--r--ironic/api/controllers/v1/port.py71
-rw-r--r--ironic/api/controllers/v1/portgroup.py4
-rw-r--r--ironic/api/controllers/v1/shard.py59
-rw-r--r--ironic/api/controllers/v1/utils.py37
-rw-r--r--ironic/api/controllers/v1/versions.py9
-rw-r--r--ironic/cmd/status.py41
-rwxr-xr-xironic/common/args.py17
-rw-r--r--ironic/common/exception.py23
-rw-r--r--ironic/common/glance_service/image_service.py3
-rw-r--r--ironic/common/images.py26
-rw-r--r--ironic/common/policy.py88
-rw-r--r--ironic/common/pxe_utils.py89
-rw-r--r--ironic/common/release_mappings.py29
-rw-r--r--ironic/common/rpc.py7
-rw-r--r--ironic/common/rpc_service.py27
-rw-r--r--ironic/common/states.py3
-rw-r--r--ironic/common/swift.py42
-rw-r--r--ironic/common/utils.py15
-rw-r--r--ironic/conductor/base_manager.py21
-rw-r--r--ironic/conductor/cleaning.py33
-rw-r--r--ironic/conductor/manager.py167
-rw-r--r--ironic/conductor/periodics.py17
-rw-r--r--ironic/conductor/steps.py6
-rw-r--r--ironic/conductor/task_manager.py11
-rw-r--r--ironic/conductor/utils.py8
-rw-r--r--ironic/conf/__init__.py8
-rw-r--r--ironic/conf/anaconda.py11
-rw-r--r--ironic/conf/api.py5
-rw-r--r--ironic/conf/conductor.py61
-rw-r--r--ironic/conf/default.py2
-rw-r--r--ironic/conf/deploy.py6
-rw-r--r--ironic/conf/dhcp.py3
-rw-r--r--ironic/conf/dnsmasq.py43
-rw-r--r--ironic/conf/fake.py85
-rw-r--r--ironic/conf/glance.py1
-rw-r--r--ironic/conf/ilo.py5
-rw-r--r--ironic/conf/inventory.py34
-rw-r--r--ironic/conf/irmc.py15
-rw-r--r--ironic/conf/opts.py5
-rw-r--r--ironic/conf/sensor_data.py89
-rw-r--r--ironic/db/api.py48
-rw-r--r--ironic/db/sqlalchemy/__init__.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/0ac0f39bc5aa_add_node_inventory_table.py46
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py16
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py8
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/4dbec778866e_create_node_shard.py31
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py4
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py5
-rw-r--r--ironic/db/sqlalchemy/api.py1074
-rw-r--r--ironic/db/sqlalchemy/models.py74
-rw-r--r--ironic/dhcp/base.py11
-rw-r--r--ironic/dhcp/dnsmasq.py159
-rw-r--r--ironic/dhcp/neutron.py11
-rw-r--r--ironic/drivers/ilo.py5
-rw-r--r--ironic/drivers/irmc.py10
-rw-r--r--ironic/drivers/modules/agent_base.py6
-rw-r--r--ironic/drivers/modules/boot_mode_utils.py2
-rw-r--r--ironic/drivers/modules/console_utils.py2
-rw-r--r--ironic/drivers/modules/drac/raid.py82
-rw-r--r--ironic/drivers/modules/fake.py63
-rw-r--r--ironic/drivers/modules/ilo/common.py42
-rw-r--r--ironic/drivers/modules/ilo/management.py79
-rw-r--r--ironic/drivers/modules/ilo/vendor.py43
-rw-r--r--ironic/drivers/modules/image_utils.py10
-rw-r--r--ironic/drivers/modules/inspect_utils.py166
-rw-r--r--ironic/drivers/modules/inspector.py12
-rw-r--r--ironic/drivers/modules/ipmitool.py6
-rw-r--r--ironic/drivers/modules/irmc/common.py226
-rw-r--r--ironic/drivers/modules/irmc/inspect.py98
-rw-r--r--ironic/drivers/modules/irmc/management.py289
-rw-r--r--ironic/drivers/modules/irmc/power.py64
-rw-r--r--ironic/drivers/modules/irmc/vendor.py75
-rw-r--r--ironic/drivers/modules/ks.cfg.template6
-rw-r--r--ironic/drivers/modules/pxe_grub_config.template5
-rw-r--r--ironic/drivers/modules/redfish/raid.py8
-rw-r--r--ironic/drivers/modules/redfish/utils.py61
-rw-r--r--ironic/drivers/modules/snmp.py339
-rw-r--r--ironic/objects/__init__.py1
-rw-r--r--ironic/objects/node.py8
-rw-r--r--ironic/objects/node_inventory.py89
-rw-r--r--ironic/objects/port.py29
-rw-r--r--ironic/objects/portgroup.py8
-rw-r--r--ironic/tests/base.py81
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py251
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_port.py88
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_root.py4
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_shard.py80
-rw-r--r--ironic/tests/unit/api/test_acl.py21
-rw-r--r--ironic/tests/unit/api/test_rbac_project_scoped.yaml548
-rw-r--r--ironic/tests/unit/api/test_rbac_system_scoped.yaml194
-rw-r--r--ironic/tests/unit/cmd/test_status.py82
-rw-r--r--ironic/tests/unit/common/test_glance_service.py57
-rw-r--r--ironic/tests/unit/common/test_images.py3
-rw-r--r--ironic/tests/unit/common/test_pxe_utils.py144
-rw-r--r--ironic/tests/unit/common/test_release_mappings.py10
-rw-r--r--ironic/tests/unit/common/test_rpc_service.py81
-rw-r--r--ironic/tests/unit/conductor/mgr_utils.py16
-rw-r--r--ironic/tests/unit/conductor/test_allocations.py2
-rw-r--r--ironic/tests/unit/conductor/test_base_manager.py6
-rw-r--r--ironic/tests/unit/conductor/test_cleaning.py47
-rw-r--r--ironic/tests/unit/conductor/test_manager.py261
-rw-r--r--ironic/tests/unit/conductor/test_utils.py24
-rw-r--r--ironic/tests/unit/db/sqlalchemy/test_migrations.py348
-rw-r--r--ironic/tests/unit/db/test_conductor.py13
-rw-r--r--ironic/tests/unit/db/test_node_inventory.py36
-rw-r--r--ironic/tests/unit/db/test_nodes.py99
-rw-r--r--ironic/tests/unit/db/test_ports.py39
-rw-r--r--ironic/tests/unit/db/test_shard.py46
-rw-r--r--ironic/tests/unit/db/utils.py32
-rw-r--r--ironic/tests/unit/dhcp/test_dnsmasq.py140
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_raid.py196
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_common.py52
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_management.py115
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_vendor.py71
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_common.py129
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_inspect.py132
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_management.py357
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_power.py88
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_common.py4
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_raid.py4
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_utils.py16
-rw-r--r--ironic/tests/unit/drivers/modules/test_image_utils.py47
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspect_utils.py229
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspector.py43
-rw-r--r--ironic/tests/unit/drivers/modules/test_ipmitool.py10
-rw-r--r--ironic/tests/unit/drivers/modules/test_snmp.py84
-rw-r--r--ironic/tests/unit/drivers/pxe_grub_config.template4
-rw-r--r--ironic/tests/unit/drivers/test_fake_hardware.py29
-rw-r--r--ironic/tests/unit/drivers/third_party_driver_mock_specs.py2
-rw-r--r--ironic/tests/unit/objects/test_node_inventory.py49
-rw-r--r--ironic/tests/unit/objects/test_objects.py7
-rw-r--r--ironic/tests/unit/objects/test_port.py12
-rw-r--r--ironic/tests/unit/objects/test_portgroup.py19
-rw-r--r--redfish-interop-profiles/OpenStackIronicProfile.v1_0_0.json221
-rw-r--r--releasenotes/config.yaml5
-rw-r--r--releasenotes/notes/add-allocations-table-check-38f1c9eef189b411.yaml8
-rw-r--r--releasenotes/notes/add-node-inventory-7cde961b14caa11e.yaml5
-rw-r--r--releasenotes/notes/add-service-role-support-8e9390769508ca99.yaml13
-rw-r--r--releasenotes/notes/additonal-snmp-drivers-ae1174e6bd6ee3a6.yaml5
-rw-r--r--releasenotes/notes/allocations-charset-5384d1ea00964bdd.yaml23
-rw-r--r--releasenotes/notes/anaconda-permit-cert-validation-disable-6611d3cb9401031d.yaml8
-rw-r--r--releasenotes/notes/catch-all-cleaning-exceptions-1317a534a1c9db56.yaml8
-rw-r--r--releasenotes/notes/change-c9c01700dcfd599b.yaml9
-rw-r--r--releasenotes/notes/cleaning-error-5c13c33c58404b97.yaml8
-rw-r--r--releasenotes/notes/concurrency-limit-control-4b101bca7136e08d.yaml23
-rw-r--r--releasenotes/notes/conductor-metric-collector-support-1b8b8c71f9f59da4.yaml39
-rw-r--r--releasenotes/notes/console-pid-file-6108d2775ef947fe.yaml6
-rw-r--r--releasenotes/notes/correct-source-path-handling-lookups-4ce2023a56372f10.yaml16
-rw-r--r--releasenotes/notes/create_csr_clean_step-a720932f61b42118.yaml7
-rw-r--r--releasenotes/notes/dnsmasq_dhcp-9154fcae927dc3de.yaml7
-rw-r--r--releasenotes/notes/fakedelay-7eac23ad8881a736.yaml8
-rw-r--r--releasenotes/notes/fix-cleaning-stuck-on-networkerror-4aedbf3673413af6.yaml8
-rw-r--r--releasenotes/notes/fix-console-port-conflict-6dc19688079e2c7f.yaml8
-rw-r--r--releasenotes/notes/fix-context-image-hardlink-16f452974abc7327.yaml7
-rw-r--r--releasenotes/notes/fix-grub2-uefi-config-path-f1b4c5083cc97ee5.yaml14
-rw-r--r--releasenotes/notes/fix-idrac-redfish-controller-mode-7b55c58d09240d3c.yaml5
-rw-r--r--releasenotes/notes/fix-ilo-boot-interface-order-238a2da9933cf28c.yaml26
-rw-r--r--releasenotes/notes/fix-inspectwait-finished-at-4b817af4bf4c30c2.yaml5
-rw-r--r--releasenotes/notes/fix-irmc-s6-2.00-http-incompatibility-61a31d12aa33fbd8.yaml19
-rw-r--r--releasenotes/notes/fix-irmc-s6-2.00-ipmi-incompatibility-118484a424df02b1.yaml15
-rw-r--r--releasenotes/notes/fix-nonetype-object-is-not-iterable-0592926d890d6c11.yaml7
-rw-r--r--releasenotes/notes/fix_anaconda-70f4268edc255ff4.yaml5
-rw-r--r--releasenotes/notes/fix_anaconda_pxe-6c75d42872424fec.yaml6
-rw-r--r--releasenotes/notes/ilo-event-subscription-0dadf136411bd16a.yaml7
-rw-r--r--releasenotes/notes/irmc-add-snmp-auth-protocols-3ff7597cea7ef9dd.yaml5
-rw-r--r--releasenotes/notes/irmc-align-with-ironic-default-boot-mode-dde6f65ea084c9e6.yaml5
-rw-r--r--releasenotes/notes/irmc-change-boot-interface-order-e76f5018da116a90.yaml26
-rw-r--r--releasenotes/notes/jsonschema-4.8-1146d103b877cffd.yaml5
-rw-r--r--releasenotes/notes/lockutils-default-logging-8c38b8c0ac71043f.yaml8
-rw-r--r--releasenotes/notes/maximum-disk-erasure-concurrency-6d132bd84e3df4cf.yaml10
-rw-r--r--releasenotes/notes/node-creation-no-longer-scope-restricted-b455f66a751f10ec.yaml27
-rw-r--r--releasenotes/notes/prepare-for-sqlalchemy-20-e817f340f261b1a2.yaml7
-rw-r--r--releasenotes/notes/redfish_consider_password_in_session_cache-1fa84234db179053.yaml7
-rw-r--r--releasenotes/notes/shard-support-a26f8d2ab5cca582.yaml14
-rw-r--r--releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml10
-rw-r--r--releasenotes/notes/wait_hash_ring_reset-ef8bd548659e9906.yaml13
-rw-r--r--releasenotes/notes/zed-prelude-09fe95b11ad2459d.yaml12
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po536
-rw-r--r--releasenotes/source/zed.rst6
-rw-r--r--reno.yaml4
-rw-r--r--requirements.txt8
-rw-r--r--setup.cfg2
-rw-r--r--setup.py4
-rw-r--r--test-requirements.txt2
-rw-r--r--tools/benchmark/do_not_run_create_benchmark_data.py63
-rw-r--r--tools/benchmark/generate-statistics.py112
-rw-r--r--tox.ini15
-rw-r--r--zuul.d/ironic-jobs.yaml66
-rw-r--r--zuul.d/project.yaml6
227 files changed, 10134 insertions, 1615 deletions
diff --git a/api-ref/source/baremetal-api-v1-nodes-inventory.inc b/api-ref/source/baremetal-api-v1-nodes-inventory.inc
new file mode 100644
index 000000000..4c36e5aa2
--- /dev/null
+++ b/api-ref/source/baremetal-api-v1-nodes-inventory.inc
@@ -0,0 +1,40 @@
+.. -*- rst -*-
+
+==============
+Node inventory
+==============
+
+.. versionadded:: 1.81
+
+Given a Node identifier, the API provides access to the introspection data
+associated to the Node via ``v1/nodes/{node_ident}/inventory`` endpoint.
+
+Fetch node inventory
+===============================
+
+.. rest_method:: GET /v1/nodes/{node_ident}/inventory
+
+Normal response code: 200
+
+Error codes:
+ - 404 (NodeNotFound, InventoryNotRecorded)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - node_ident: node_ident
+
+Response
+--------
+
+.. rest_parameters:: parameters.yaml
+
+ - inventory: n_inventory
+ - plugin_data: n_plugin_data
+
+**Example of inventory from a node:**
+
+.. literalinclude:: samples/node-inventory-response.json
+ :language: javascript
diff --git a/api-ref/source/baremetal-api-v1-nodes.inc b/api-ref/source/baremetal-api-v1-nodes.inc
index 2ebbd2c5d..47bcceb58 100644
--- a/api-ref/source/baremetal-api-v1-nodes.inc
+++ b/api-ref/source/baremetal-api-v1-nodes.inc
@@ -104,6 +104,9 @@ supplied when the Node is created, or the resource may be updated later.
.. versionadded:: 1.65
Introduced the ``lessee`` field.
+.. versionadded:: 1.82
+ Introduced the ``shard`` field.
+
Normal response codes: 201
Error codes: 400,403,406
@@ -135,6 +138,7 @@ Request
- owner: owner
- description: req_n_description
- lessee: lessee
+ - shard: shard
- automated_clean: req_automated_clean
- bios_interface: req_bios_interface
- chassis_uuid: req_chassis_uuid
@@ -161,7 +165,7 @@ and any defaults added for non-specified fields. Most fields default to "null"
or "".
The list and example below are representative of the response as of API
-microversion 1.48.
+microversion 1.81.
.. rest_parameters:: parameters.yaml
@@ -213,6 +217,7 @@ microversion 1.48.
- conductor: conductor
- owner: owner
- lessee: lessee
+ - shard: shard
- description: n_description
- allocation_uuid: allocation_uuid
- automated_clean: automated_clean
@@ -280,6 +285,9 @@ provision state, and maintenance setting for each Node.
.. versionadded:: 1.65
Introduced the ``lessee`` field.
+.. versionadded:: 1.82
+ Introduced the ``shard`` field. Introduced the ``sharded`` request parameter.
+
Normal response codes: 200
Error codes: 400,403,406
@@ -300,6 +308,8 @@ Request
- fault: r_fault
- owner: owner
- lessee: lessee
+ - shard: req_shard
+ - sharded: req_sharded
- description_contains: r_description_contains
- fields: fields
- limit: limit
@@ -371,6 +381,9 @@ Nova instance, eg. with a request to ``v1/nodes/detail?instance_uuid={NOVA INSTA
.. versionadded:: 1.65
Introduced the ``lessee`` field.
+.. versionadded:: 1.82
+ Introduced the ``shard`` field. Introduced the ``sharded`` request parameter.
+
Normal response codes: 200
Error codes: 400,403,406
@@ -391,6 +404,8 @@ Request
- conductor: r_conductor
- owner: owner
- lessee: lessee
+ - shard: req_shard
+ - sharded: req_sharded
- description_contains: r_description_contains
- limit: limit
- marker: marker
@@ -450,6 +465,7 @@ Response
- protected_reason: protected_reason
- owner: owner
- lessee: lessee
+ - shard: shard
- description: n_description
- conductor: conductor
- allocation_uuid: allocation_uuid
@@ -508,6 +524,9 @@ only the specified set.
.. versionadded:: 1.66
Introduced the ``network_data`` field.
+.. versionadded:: 1.82
+ Introduced the ``shard`` field.
+
Normal response codes: 200
Error codes: 400,403,404,406
@@ -573,6 +592,7 @@ Response
- protected_reason: protected_reason
- owner: owner
- lessee: lessee
+ - shard: shard
- description: n_description
- conductor: conductor
- allocation_uuid: allocation_uuid
@@ -600,6 +620,9 @@ managed through sub-resources.
.. versionadded:: 1.51
Introduced the ability to set/unset a node's description.
+.. versionadded:: 1.82
+ Introduced the ability to set/unset a node's shard.
+
Normal response codes: 200
Error codes: 400,403,404,406,409
@@ -670,6 +693,7 @@ Response
- protected_reason: protected_reason
- owner: owner
- lessee: lessee
+ - shard: shard
- description: n_description
- conductor: conductor
- allocation_uuid: allocation_uuid
diff --git a/api-ref/source/baremetal-api-v1-ports.inc b/api-ref/source/baremetal-api-v1-ports.inc
index f40d13391..3fa7e9d47 100644
--- a/api-ref/source/baremetal-api-v1-ports.inc
+++ b/api-ref/source/baremetal-api-v1-ports.inc
@@ -49,6 +49,10 @@ By default, this query will return the uuid and address for each Port.
.. versionadded:: 1.53
Added the ``is_smartnic`` field.
+.. versionadded:: 1.82
+ Added the ability to filter ports based on the shard of the node they are
+ associated with.
+
Normal response code: 200
Request
@@ -60,6 +64,7 @@ Request
- node_uuid: r_port_node_uuid
- portgroup: r_port_portgroup_ident
- address: r_port_address
+ - shard: r_port_shard
- fields: fields
- limit: limit
- marker: marker
diff --git a/api-ref/source/baremetal-api-v1-shards.inc b/api-ref/source/baremetal-api-v1-shards.inc
new file mode 100644
index 000000000..c051e506e
--- /dev/null
+++ b/api-ref/source/baremetal-api-v1-shards.inc
@@ -0,0 +1,56 @@
+.. -*- rst -*-
+
+======
+Shards
+======
+
+This section describes an API endpoint returning the population of shards
+among nodes in the Bare Metal Service. Shards are a way to group nodes in the
+Bare Metal service. They are used by API clients to separate nodes into groups,
+allowing horizontal scaling.
+
+Shards are not directly added and removed from the Bare Metal service. Instead,
+operators can configure a node into a given shard by setting the ``shard`` key
+to any unique string value representing the shard.
+
+.. note::
+ The Bare Metal Service does not use shards directly. It instead relies on
+ API clients and external services to use shards to group nodes into smaller
+ areas of responsibility.
+
+
+Shards
+======
+
+.. rest_method:: GET /v1/shards
+
+.. versionadded:: 1.82
+
+The ``/v1/shards`` endpoint exists to allow querying the distribution of nodes
+between all defined shards.
+
+Normal response codes: 200
+
+Error response codes: 400 403 404
+
+Request
+-------
+
+No request parameters are accepted by this endpoint.
+
+Response
+--------
+
+Returns a list of shards and the count of nodes assigned to each. The
+list is sorted by descending count.
+
+.. rest_parameters:: parameters.yaml
+
+ - name: shard_name
+ - count: shard_count
+
+Response Example
+----------------
+
+.. literalinclude:: samples/shards-list-response.json
+ :language: javascript
diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst
index 50c6a6d14..bb41ba6fd 100644
--- a/api-ref/source/index.rst
+++ b/api-ref/source/index.rst
@@ -28,6 +28,7 @@
.. include:: baremetal-api-v1-node-allocation.inc
.. include:: baremetal-api-v1-deploy-templates.inc
.. include:: baremetal-api-v1-nodes-history.inc
+.. include:: baremetal-api-v1-shards.inc
.. NOTE(dtantsur): keep chassis close to the end since it's semi-deprecated
.. include:: baremetal-api-v1-chassis.inc
.. NOTE(dtantsur): keep misc last, since it covers internal API
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index d0da64ec2..6a50c9452 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -343,13 +343,17 @@ r_port_node_ident:
description: |
Filter the list of returned Ports, and only return the ones associated
with this specific node (name or UUID), or an empty set if not found.
+ This filter takes precedence over all other filters, and cannot be set at
+ the same time as node_uuid or portgroup.
in: query
required: false
type: string
r_port_node_uuid:
description: |
Filter the list of returned Ports, and only return the ones associated
- with this specific node UUID, or an empty set if not found.
+ with this specific node UUID, or an empty set if not found. This filter
+ takes precedence over all other filters, and cannot be set at the same
+ time as node or portgroup.
in: query
required: false
type: string
@@ -357,9 +361,18 @@ r_port_portgroup_ident:
description: |
Filter the list of returned Ports, and only return the ones associated
with this specific Portgroup (name or UUID), or an empty set if not found.
+ This filter takes precedence over all other filters, and cannot be set at
+ the same time as node_uuid or node.
in: query
required: false
type: string
+r_port_shard:
+ description: |
+ Filter the list of returned Ports, and only return the ones associated
+ with nodes in this specific shard(s), or an empty set if not found.
+ in: query
+ required: false
+ type: array
r_portgroup_address:
description: |
Filter the list of returned Portgroups, and only return the ones with the
@@ -1191,6 +1204,18 @@ n_indicators:
in: body
required: true
type: array
+n_inventory:
+ description: |
+ Inventory of this node.
+ in: body
+ required: false
+ type: JSON
+n_plugin_data:
+ description: |
+ Plugin data of this node.
+ in: body
+ required: false
+ type: JSON
n_portgroups:
description: |
Links to the collection of portgroups on this node.
@@ -1795,6 +1820,20 @@ req_resource_class_create:
in: body
required: false
type: string
+req_shard:
+ description: |
+ Filter the list of returned Nodes, and only return the ones associated
+ with nodes in this specific shard(s), or an empty set if not found.
+ in: body
+ required: false
+ type: array
+req_sharded:
+ description: |
+ When true, filter the list of returned Nodes, and only return the ones with
+ a non-null ``shard`` value. When false, the inverse filter is performed.
+ in: body
+ required: false
+ type: boolean
req_standalone_ports_supported:
description: |
Indicates whether ports that are members of this portgroup can be
@@ -1920,6 +1959,22 @@ secure_boot:
Indicates whether node is currently booted with secure_boot turned on.
in: body
type: boolean
+shard:
+ description: |
+ A string indicating the shard this node belongs to.
+ in: body
+ type: string
+shard_count:
+ description: |
+ The number of nodes with this current string as their assigned shard value.
+ in: body
+ type: integer
+shard_name:
+ description: |
+ The name of the shard. A value of "None" indicates the count of nodes with
+ an empty shard value.
+ in: body
+ type: string
standalone_ports_supported:
description: |
Indicates whether ports that are members of this portgroup can be
diff --git a/api-ref/source/samples/node-inventory-response.json b/api-ref/source/samples/node-inventory-response.json
new file mode 100644
index 000000000..7916f6717
--- /dev/null
+++ b/api-ref/source/samples/node-inventory-response.json
@@ -0,0 +1,31 @@
+{
+ "inventory": {
+ "interfaces":[
+ {
+ "name":"eth0",
+ "mac_address":"52:54:00:90:35:d6",
+ "ipv4_address":"192.168.122.128",
+ "ipv6_address":"fe80::5054:ff:fe90:35d6%eth0",
+ "has_carrier":true,
+ "lldp":null,
+ "vendor":"0x1af4",
+ "product":"0x0001"
+ }
+ ],
+ "cpu":{
+ "model_name":"QEMU Virtual CPU version 2.5+",
+ "frequency":null,
+ "count":1,
+ "architecture":"x86_64"
+ }
+ },
+ "plugin_data":{
+ "macs":[
+ "52:54:00:90:35:d6"
+ ],
+ "local_gb":10,
+ "cpus":1,
+ "cpu_arch":"x86_64",
+ "memory_mb":2048
+ }
+}
diff --git a/api-ref/source/samples/shards-list-response.json b/api-ref/source/samples/shards-list-response.json
new file mode 100644
index 000000000..776dd324b
--- /dev/null
+++ b/api-ref/source/samples/shards-list-response.json
@@ -0,0 +1,12 @@
+{
+ "shards": [
+ {
+ "count": 47,
+ "name": "example_shard1",
+ },
+ {
+ "count": 46,
+ "name": "example_shard2"
+ }
+ ]
+}
diff --git a/devstack/lib/ironic b/devstack/lib/ironic
index ab96638c0..17ba547f1 100644
--- a/devstack/lib/ironic
+++ b/devstack/lib/ironic
@@ -586,7 +586,7 @@ TEMPEST_BAREMETAL_MIN_MICROVERSION=${TEMPEST_BAREMETAL_MIN_MICROVERSION:-}
TEMPEST_BAREMETAL_MAX_MICROVERSION=${TEMPEST_BAREMETAL_MAX_MICROVERSION:-}
# TODO(TheJulia): This PHYSICAL_NETWORK needs to be refactored in
-# our devstack plugin. It is used by the neutron-legacy integration,
+# our devstack plugin. It is used by the neutron integration,
# however they want to name the new variable for the current neutron
# plugin NEUTRON_PHYSICAL_NETWORK. For now we'll do some magic and
# change it later once we migrate our jobs.
@@ -594,7 +594,7 @@ TEMPEST_BAREMETAL_MAX_MICROVERSION=${TEMPEST_BAREMETAL_MAX_MICROVERSION:-}
PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-${PHYSICAL_NETWORK:-}}
# Ramdisk ISO image for Ramdisk Virtual Media/iPXE testing
-IRONIC_RAMDISK_IMAGE=${IRONIC_RAMDISK_IMAGE:-http://tinycorelinux.net/10.x/x86/archive/10.0/Core-10.0.iso}
+IRONIC_RAMDISK_IMAGE=${IRONIC_RAMDISK_IMAGE:-http://tinycorelinux.net/13.x/x86/archive/13.0/Core-13.0.iso}
IRONIC_LOADER_PATHS=${IRONIC_LOADER_PATHS:-}
@@ -1332,6 +1332,17 @@ function configure_ironic_networks {
configure_ironic_cleaning_network
echo_summary "Configuring Ironic rescue network"
configure_ironic_rescue_network
+ echo_summary "Configuring Neutron Private Subnet, if needed."
+ configure_ironic_private_subnet
+}
+
+function configure_ironic_private_subnet {
+ if [[ "${IRONIC_ANACONDA_IMAGE_REF:-}" != "" ]]; then
+ # NOTE(TheJulia): Anaconda needs DNS for FQDN resolution
+ # and devstack doesn't create this network with dns.
+ subnet_id=$(openstack --os-cloud $OS_CLOUD subnet show private-subnet -f value -c id)
+ openstack --os-cloud $OS_CLOUD subnet set --dns-nameserver 8.8.8.8 $subnet_id
+ fi
}
function configure_ironic_cleaning_network {
@@ -1405,7 +1416,8 @@ function configure_ironic_provision_network {
${net_segment_id:+--network-segment $net_segment_id} \
$IRONIC_PROVISION_PROVIDER_SUBNET_NAME \
--gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \
- --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)"
+ --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX \
+ --dns-nameserver 8.8.8.8 -f value -c id)"
else
# NOTE(TheJulia): Consider changing this to stateful to support UEFI once we move
# CI to Ubuntu Jammy as it will support v6 and v4 UEFI firmware driven boot ops.
@@ -1637,15 +1649,9 @@ function configure_client_for {
# NOTE(TheJulia): Below are services which we know, as of late 2021, which support
# explicit scope based ops *and* have knobs.
- # Needed: Neutron, swift, nova ?service_catalog?
- # Neutron - https://review.opendev.org/c/openstack/devstack/+/797450
if [[ "$service_config_section" == "inspector" ]] && [[ "$IRONIC_INSPECTOR_ENFORCE_SCOPE" == "True" ]]; then
use_system_scope="True"
- elif [[ "$service_config_section" == "cinder" ]] && [[ "${CINDER_ENFORCE_SCOPE:-False}" == "True" ]]; then
- use_system_scope="True"
- elif [[ "$service_config_section" == "glance" ]] && [[ "${GLANCE_ENFORCE_SCOPE:-False}" == "True" ]]; then
- use_system_scope="True"
fi
if [[ "$use_system_scope" == "True" ]]; then
@@ -1912,6 +1918,11 @@ function init_ironic {
# NOTE(rloo): We're not upgrading but want to make sure this command works,
# even though we're not parsing the output of this command.
$IRONIC_BIN_DIR/ironic-status upgrade check
+
+ $IRONIC_BIN_DIR/ironic-status upgrade check && ret_val=$? || ret_val=$?
+ if [ $ret_val -gt 1 ] ; then
+ die $LINENO "The `ironic-status upgrade check` command returned an error. Cannot proceed."
+ fi
}
# _ironic_bm_vm_names() - Generates list of names for baremetal VMs.
@@ -2365,6 +2376,9 @@ function enroll_nodes {
local ironic_node_disk=$IRONIC_VM_SPECS_DISK
local ironic_ephemeral_disk=$IRONIC_VM_EPHEMERAL_DISK
local ironic_node_arch=x86_64
+ if [[ ! -f $IRONIC_VM_MACS_CSV_FILE ]]; then
+ touch $IRONIC_VM_MACS_CSV_FILE
+ fi
local ironic_hwinfo_file=$IRONIC_VM_MACS_CSV_FILE
if is_deployed_by_ipmi; then
@@ -2932,8 +2946,16 @@ function upload_baremetal_ironic_efiboot {
sudo mkdir -p $efiboot_mount/efi/boot
- sudo cp "$IRONIC_GRUB2_SHIM_FILE" $efiboot_mount/efi/boot/bootx64.efi
- sudo cp "$IRONIC_GRUB2_FILE" $efiboot_mount/efi/boot/grubx64.efi
+ if [[ "$IRONIC_GRUB2_SHIM_FILE" =~ "http".* ]]; then
+ sudo wget "$IRONIC_GRUB2_SHIM_FILE" -O $efiboot_mount/efi/boot/bootx64.efi
+ else
+ sudo cp "$IRONIC_GRUB2_SHIM_FILE" $efiboot_mount/efi/boot/bootx64.efi
+ fi
+ if [[ "$IRONIC_GRUB2_FILE" =~ "http".* ]]; then
+ sudo wget "$IRONIC_GRUB2_FILE" -O $efiboot_mount/efi/boot/grubx64.efi
+ else
+ sudo cp "$IRONIC_GRUB2_FILE" $efiboot_mount/efi/boot/grubx64.efi
+ fi
sudo umount $efiboot_mount
@@ -2970,7 +2992,7 @@ function upload_baremetal_ironic_efiboot {
# NOTE(dtantsur): this is likely incorrect
efi_grub_path=EFI/BOOT/grub.cfg
fi
- iniset $IRONIC_CONF_FILE DEFAULT grub_config_path $efi_grub_path
+ iniset $IRONIC_CONF_FILE DEFAULT grub_config_path ${IRONIC_GRUB2_CONFIG_PATH:-$efi_grub_path}
}
# build deploy kernel+ramdisk, then upload them to glance
@@ -3057,6 +3079,16 @@ function upload_baremetal_ironic_deploy {
iniset $IRONIC_CONF_FILE conductor deploy_ramdisk $IRONIC_DEPLOY_RAMDISK_ID
iniset $IRONIC_CONF_FILE conductor rescue_kernel $IRONIC_DEPLOY_KERNEL_ID
iniset $IRONIC_CONF_FILE conductor rescue_ramdisk $IRONIC_DEPLOY_RAMDISK_ID
+
+ if [[ "${IRONIC_ANACONDA_INSECURE_HEARTBEAT:-}" != "" ]]; then
+ iniset $IRONIC_CONF_FILE anaconda insecure_heartbeat ${IRONIC_ANACONDA_INSECURE_HEARTBEAT:-}
+ fi
+ # NOTE(TheJulia): Compared to an image deploy, anaconda is relatively
+ # slow as it installs packages one at a time. As such, we need an option
+ # to extend.
+ if [[ "${IRONIC_DEPLOY_CALLBACK_WAIT_TIMEOUT:-}" != "" ]]; then
+ iniset $IRONIC_CONF_FILE conductor deploy_callback_timeout ${IRONIC_DEPLOY_CALLBACK_WAIT_TIMEOUT:-}
+ fi
}
function prepare_baremetal_basic_ops {
@@ -3221,6 +3253,23 @@ function ironic_configure_tempest {
if [[ "$IRONIC_RAMDISK_IMAGE" != "" ]]; then
iniset $TEMPEST_CONFIG baremetal ramdisk_iso_image_ref "$IRONIC_RAMDISK_IMAGE"
fi
+ if [[ "${IRONIC_ANACONDA_IMAGE_REF:-}" != "" ]]; then
+ # In a perfect world we would use *just* the opendev repo
+ # mirror, and let things be magical, but OpenDev Infra cannot
+ # mirror the /images path with the limited storage space.
+ iniset $TEMPEST_CONFIG baremetal anaconda_image_ref ${IRONIC_ANACONDA_IMAGE_REF:-}
+ fi
+ if [[ "${IRONIC_ANACONDA_KERNEL_REF:-}" != "" ]]; then
+ iniset $TEMPEST_CONFIG baremetal anaconda_kernel_ref ${IRONIC_ANACONDA_KERNEL_REF:-}
+ fi
+ if [[ "${IRONIC_ANACONDA_RAMDISK_REF:-}" != "" ]]; then
+ iniset $TEMPEST_CONFIG baremetal anaconda_initial_ramdisk_ref ${IRONIC_ANACONDA_RAMDISK_REF:-}
+ fi
+ if [[ "${IRONIC_ANACONDA_STAGE2_REF:-}" != "" ]]; then
+ iniset $TEMPEST_CONFIG baremetal anaconda_stage2_ramdisk_ref ${IRONIC_ANACONDA_STAGE2_REF:-}
+
+ fi
+
# NOTE(dtantsur): keep this option here until the defaults change in
# ironic-tempest-plugin to disable classic drivers testing.
iniset $TEMPEST_CONFIG baremetal enabled_drivers ""
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index f49c63d38..306569f51 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -15,7 +15,7 @@ if is_service_enabled ir-api ir-cond; then
echo_summary "Installing Ironic"
if ! is_service_enabled nova; then
- source $RC_DIR/lib/nova_plugins/functions-libvirt
+ source $TOP_DIR/lib/nova_plugins/functions-libvirt
install_libvirt
fi
install_ironic
diff --git a/devstack/tools/ironic/scripts/cirros-partition.sh b/devstack/tools/ironic/scripts/cirros-partition.sh
index 40c87b19e..facf9b030 100755
--- a/devstack/tools/ironic/scripts/cirros-partition.sh
+++ b/devstack/tools/ironic/scripts/cirros-partition.sh
@@ -8,7 +8,7 @@ if [[ "$VERBOSE" == True ]]; then
guestfish_args="--verbose"
fi
-CIRROS_VERSION=${CIRROS_VERSION:-0.5.2}
+CIRROS_VERSION=${CIRROS_VERSION:-0.6.1}
CIRROS_ARCH=${CIRROS_ARCH:-x86_64}
# TODO(dtantsur): use the image cached on infra images in the CI
DISK_URL=http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img
diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh
index 7801ccd26..a3d51696a 100755
--- a/devstack/upgrade/upgrade.sh
+++ b/devstack/upgrade/upgrade.sh
@@ -40,7 +40,7 @@ set -o errexit
source $TARGET_DEVSTACK_DIR/stackrc
source $TARGET_DEVSTACK_DIR/lib/tls
source $TARGET_DEVSTACK_DIR/lib/nova
-source $TARGET_DEVSTACK_DIR/lib/neutron-legacy
+source $TARGET_DEVSTACK_DIR/lib/neutron
source $TARGET_DEVSTACK_DIR/lib/apache
source $TARGET_DEVSTACK_DIR/lib/keystone
diff --git a/doc/source/admin/anaconda-deploy-interface.rst b/doc/source/admin/anaconda-deploy-interface.rst
index 2c686506a..2b7195525 100644
--- a/doc/source/admin/anaconda-deploy-interface.rst
+++ b/doc/source/admin/anaconda-deploy-interface.rst
@@ -271,11 +271,44 @@ purposes.
``liveimg`` which is used as the base operating system image to
start with.
+Configuration Considerations
+----------------------------
+
+When using the ``anaconda`` deployment interface, some configuration
+parameters may need to be adjusted in your environment. This is in large
+part due to the general defaults being set to much lower values for image
+based deployments, but the way the anaconda deployment interface works,
+you may need to make some adjustments.
+
+* ``[conductor]deploy_callback_timeout`` likely needs to be adjusted
+ for most ``anaconda`` deployment interface users. By default this
+ is a timer which looks for "agents" which have not checked in with
+ Ironic, or agents which may have crashed or failed after they
+ started. If the value is reached, then the current operation is failed.
+ This value should be set to a number of seconds which exceeds your
+ average anaconda deployment time.
+* ``[pxe]boot_retry_timeout`` can also be triggered and result in
+ an anaconda deployment in progress getting reset as it is intended
+ to reboot nodes which might have failed their initial PXE operation.
+ Depending on sizes of images, and the exact nature of what was deployed,
+ it may be necessary to ensure this is a much higher value.
+
Limitations
-----------
-This deploy interface has only been tested with Red Hat based operating systems
-that use anaconda. Other systems are not supported.
+* This deploy interface has only been tested with Red Hat based operating
+ systems that use anaconda. Other systems are not supported.
+
+* Runtime TLS certifiate injection into ramdisks is not supported. Assets
+ such as ``ramdisk`` or a ``stage2`` ramdisk image need to have trusted
+ Certificate Authority certificates present within the images *or* the
+ Ironic API endpoint utilized should utilize a known trusted Certificate
+ Authority.
+
+* The ``anaconda`` tooling deploying the instance/workload does not
+ heartbeat to Ironic like the ``ironic-python-agent`` driven ramdisks.
+ As such, you may need to adjust some timers. See
+ `Configuration Considerations`_ for some details on this.
.. _`anaconda`: https://fedoraproject.org/wiki/Anaconda
.. _`ks.cfg.template`: https://opendev.org/openstack/ironic/src/branch/master/ironic/drivers/modules/ks.cfg.template
diff --git a/doc/source/admin/drivers.rst b/doc/source/admin/drivers.rst
index c3d8eb377..f35cb2dfa 100644
--- a/doc/source/admin/drivers.rst
+++ b/doc/source/admin/drivers.rst
@@ -26,6 +26,7 @@ Hardware Types
drivers/redfish
drivers/snmp
drivers/xclarity
+ drivers/fake
Changing Hardware Types and Interfaces
--------------------------------------
diff --git a/doc/source/admin/drivers/fake.rst b/doc/source/admin/drivers/fake.rst
new file mode 100644
index 000000000..ea7d7ef4c
--- /dev/null
+++ b/doc/source/admin/drivers/fake.rst
@@ -0,0 +1,36 @@
+===========
+Fake driver
+===========
+
+Overview
+========
+
+The ``fake-hardware`` hardware type is what it claims to be: fake. Use of this
+type or the ``fake`` interfaces should be temporary or limited to
+non-production environments, as the ``fake`` interfaces do not perform any of
+the actions typically expected.
+
+The ``fake`` interfaces can be configured to be combined with any of the
+"real" hardware interfaces, allowing you to effectively disable one or more
+hardware interfaces for testing by simply setting that interface to
+``fake``.
+
+Use cases
+=========
+
+Development
+-----------
+Developers can use ``fake-hardware`` hardware-type to mock out nodes for
+testing without those nodes needing to exist with physical or virtual hardware.
+
+Adoption
+--------
+Some OpenStack deployers have used ``fake`` interfaces in Ironic to allow an
+adoption-style workflow with Nova. By setting a node's hardware interfaces to
+``fake``, it's possible to deploy to that node with Nova without causing any
+actual changes to the hardware or an OS already deployed on it.
+
+This is generally an unsupported use case, but it is possible. For more
+information, see the relevant `post from CERN TechBlog`_.
+
+.. _`post from CERN TechBlog`: https://techblog.web.cern.ch/techblog/post/ironic-nova-adoption/
diff --git a/doc/source/admin/drivers/ibmc.rst b/doc/source/admin/drivers/ibmc.rst
index 1bf9a3ba2..0f7fe1d90 100644
--- a/doc/source/admin/drivers/ibmc.rst
+++ b/doc/source/admin/drivers/ibmc.rst
@@ -312,6 +312,6 @@ boot_up_seq GET Query boot up sequence
get_raid_controller_list GET Query RAID controller summary info
======================== ============ ======================================
-.. _Huawei iBMC: https://e.huawei.com/en/products/cloud-computing-dc/servers/accessories/ibmc
+.. _Huawei iBMC: https://e.huawei.com/en/products/computing/kunpeng/accessories/ibmc
.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security
.. _HUAWEI iBMC Client library: https://pypi.org/project/python-ibmcclient/
diff --git a/doc/source/admin/drivers/ilo.rst b/doc/source/admin/drivers/ilo.rst
index f764a6d89..b6825fc40 100644
--- a/doc/source/admin/drivers/ilo.rst
+++ b/doc/source/admin/drivers/ilo.rst
@@ -55,6 +55,8 @@ The hardware type ``ilo`` supports following HPE server features:
* `Updating security parameters as manual clean step`_
* `Update Minimum Password Length security parameter as manual clean step`_
* `Update Authentication Failure Logging security parameter as manual clean step`_
+* `Create Certificate Signing Request(CSR) as manual clean step`_
+* `Add HTTPS Certificate as manual clean step`_
* `Activating iLO Advanced license as manual clean step`_
* `Removing CA certificates from iLO as manual clean step`_
* `Firmware based UEFI iSCSI boot from volume support`_
@@ -65,6 +67,7 @@ The hardware type ``ilo`` supports following HPE server features:
* `BIOS configuration support`_
* `IPv6 support`_
* `Layer 3 or DHCP-less ramdisk booting`_
+* `Events subscription`_
Apart from above features hardware type ``ilo5`` also supports following
features:
@@ -200,6 +203,18 @@ The ``ilo`` hardware type supports following hardware interfaces:
enabled_hardware_types = ilo
enabled_rescue_interfaces = agent,no-rescue
+* vendor
+ Supports ``ilo``, ``ilo-redfish`` and ``no-vendor``. The default is
+ ``ilo``. They can be enabled by using the
+ ``[DEFAULT]enabled_vendor_interfaces`` option in ``ironic.conf`` as given
+ below:
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ enabled_hardware_types = ilo
+ enabled_vendor_interfaces = ilo,ilo-redfish,no-vendor
+
The ``ilo5`` hardware type supports all the ``ilo`` interfaces described above,
except for ``boot`` and ``raid`` interfaces. The details of ``boot`` and
@@ -751,6 +766,12 @@ Supported **Manual** Cleaning Operations
``update_auth_failure_logging_threshold``:
Updates the Authentication Failure Logging security parameter. See
`Update Authentication Failure Logging security parameter as manual clean step`_ for user guidance on usage.
+ ``create_csr``:
+ Creates the certificate signing request. See `Create Certificate Signing Request(CSR) as manual clean step`_
+ for user guidance on usage.
+ ``add_https_certificate``:
+ Adds the signed HTTPS certificate to the iLO. See `Add HTTPS Certificate as manual clean step`_ for user
+ guidance on usage.
* iLO with firmware version 1.5 is minimally required to support all the
operations.
@@ -1648,6 +1669,54 @@ Both the arguments ``logging_threshold`` and ``ignore`` are optional. The accept
value be False. If user passes the value of logging_threshold as 0, the Authentication Failure Logging security
parameter will be disabled.
+Create Certificate Signing Request(CSR) as manual clean step
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+iLO driver can invoke ``create_csr`` request as a manual clean step. This step is only supported for iLO5 based hardware.
+
+An example of a manual clean step with ``create_csr`` as the only clean step could be::
+
+ "clean_steps": [{
+ "interface": "management",
+ "step": "create_csr",
+ "args": {
+ "csr_params": {
+ "City": "Bengaluru",
+ "CommonName": "1.1.1.1",
+ "Country": "India",
+ "OrgName": "HPE",
+ "State": "Karnataka"
+ }
+ }
+ }]
+
+The ``[ilo]cert_path`` option in ``ironic.conf`` is used as the directory path for
+creating the CSR, which defaults to ``/var/lib/ironic/ilo``. The CSR is created in the directory location
+given in ``[ilo]cert_path`` in ``node_uuid`` directory as <node_uuid>.csr.
+
+
+Add HTTPS Certificate as manual clean step
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+iLO driver can invoke ``add_https_certificate`` request as a manual clean step. This step is only supported for
+iLO5 based hardware.
+
+An example of a manual clean step with ``add_https_certificate`` as the only clean step could be::
+
+ "clean_steps": [{
+ "interface": "management",
+ "step": "add_https_certificate",
+ "args": {
+ "cert_file": "/test1/iLO.crt"
+ }
+ }]
+
+Argument ``cert_file`` is mandatory. The ``cert_file`` takes the path or url of the certificate file.
+The url schemes supported are: ``file``, ``http`` and ``https``.
+The CSR generated in step ``create_csr`` needs to be signed by a valid CA and the resultant HTTPS certificate should
+be provided in ``cert_file``. It copies the ``cert_file`` to ``[ilo]cert_path`` under ``node.uuid`` as <node_uuid>.crt
+before adding it to iLO.
+
RAID Support
^^^^^^^^^^^^
@@ -2136,6 +2205,20 @@ DHCP-less deploy is supported by ``ilo`` and ``ilo5`` hardware types.
However it would work only with ilo-virtual-media boot interface. See
:doc:`/admin/dhcp-less` for more information.
+Events subscription
+^^^^^^^^^^^^^^^^^^^
+Events subscription is supported by ``ilo`` and ``ilo5`` hardware types with
+``ilo`` vendor interface for Gen10 and Gen10 Plus servers. See
+:ref:`node-vendor-passthru-methods` for more information.
+
+Anaconda based deployment
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Deployment with ``anaconda`` deploy interface is supported by ``ilo`` and
+``ilo5`` hardware type and works with ``ilo-pxe`` and ``ilo-ipxe``
+boot interfaces. See :doc:`/admin/anaconda-deploy-interface` for
+more information.
+
+
.. _`ssacli documentation`: https://support.hpe.com/hpsc/doc/public/display?docId=c03909334
.. _`proliant-tools`: https://docs.openstack.org/diskimage-builder/latest/elements/proliant-tools/README.html
.. _`HPE iLO4 User Guide`: https://h20566.www2.hpe.com/hpsc/doc/public/display?docId=c03334051
diff --git a/doc/source/admin/drivers/irmc.rst b/doc/source/admin/drivers/irmc.rst
index 17b8d8644..9ddfa3b3d 100644
--- a/doc/source/admin/drivers/irmc.rst
+++ b/doc/source/admin/drivers/irmc.rst
@@ -123,11 +123,29 @@ Configuration via ``driver_info``
the iRMC with administrator privileges.
- ``driver_info/irmc_password`` property to be ``password`` for
irmc_username.
- - ``properties/capabilities`` property to be ``boot_mode:uefi`` if
- UEFI boot is required.
- - ``properties/capabilities`` property to be ``secure_boot:true`` if
- UEFI Secure Boot is required. Please refer to `UEFI Secure Boot Support`_
- for more information.
+
+ .. note::
+ Fujitsu server equipped with iRMC S6 2.00 or later version of firmware
+ disables IPMI over LAN by default. However user may be able to enable IPMI
+ via BMC settings.
+ To handle this change, ``irmc`` hardware type first tries IPMI and,
+ if IPMI operation fails, ``irmc`` hardware type uses Redfish API of Fujitsu
+ server to provide Ironic functionalities.
+ So if user deploys Fujitsu server with iRMC S6 2.00 or later, user needs
+ to set Redfish related parameters in ``driver_info``.
+
+ - ``driver_info/redifsh_address`` property to be ``IP address`` or
+ ``hostname`` of the iRMC. You can prefix it with protocol (e.g.
+ ``https://``). If you don't provide protocol, Ironic assumes HTTPS
+ (i.e. add ``https://`` prefix).
+ iRMC with S6 2.00 or later only support HTTPS connection to Redfish API.
+ - ``driver_info/redfish_username`` to be user name of iRMC with administrative
+ privileges
+ - ``driver_info/redfish_password`` to be password of ``redfish_username``
+ - ``driver_info/redfish_verify_ca`` accepts values those accepted in
+ ``driver_info/irmc_verify_ca``
+ - ``driver_info/redfish_auth_type`` to be one of ``basic``, ``session`` or
+ ``auto``
* If ``port`` in ``[irmc]`` section of ``/etc/ironic/ironic.conf`` or
``driver_info/irmc_port`` is set to 443, ``driver_info/irmc_verify_ca``
@@ -191,6 +209,22 @@ Configuration via ``driver_info``
- ``driver_info/irmc_snmp_priv_password`` property to be the privacy protocol
pass phrase. The length of pass phrase should be at least 8 characters.
+
+Configuration via ``properties``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* Each node is configured for ``irmc`` hardware type by setting the following
+ ironic node object's properties:
+
+ - ``properties/capabilities`` property to be ``boot_mode:uefi`` if
+ UEFI boot is required, or ``boot_mode:bios`` if Legacy BIOS is required.
+ If this is not set, ``default_boot_mode`` at ``[default]`` section in
+ ``ironic.conf`` will be used.
+ - ``properties/capabilities`` property to be ``secure_boot:true`` if
+ UEFI Secure Boot is required. Please refer to `UEFI Secure Boot Support`_
+ for more information.
+
+
Configuration via ``ironic.conf``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -199,6 +233,25 @@ Configuration via ``ironic.conf``
- ``port``: Port to be used for iRMC operations; either 80
or 443. The default value is 443. Optional.
+
+ .. note::
+ Since iRMC S6 2.00, iRMC firmware doesn't support HTTP connection to
+ REST API. If you deploy server with iRMS S6 2.00 and later, please
+ set ``port`` to 443.
+
+ ``irmc`` hardware type provides ``verify_step`` named
+ ``verify_http_https_connection_and_fw_version`` to check HTTP(S)
+ connection to iRMC REST API. If HTTP(S) connection is successfully
+ established, then it fetches and caches iRMC firmware version.
+ If HTTP(S) connection to iRMC REST API failed, Ironic node's state
+ moves to ``enroll`` with suggestion put in log message.
+ Default priority of this verify step is 10.
+
+ If operator updates iRMC firmware version of node, operator should
+ run ``cache_irmc_firmware_version`` node vendor passthru method
+ to update iRMC firmware version stored in
+ ``driver_internal_info/irmc_fw_version``.
+
- ``auth_method``: Authentication method for iRMC operations;
either ``basic`` or ``digest``. The default value is ``basic``. Optional.
- ``client_timeout``: Timeout (in seconds) for iRMC
@@ -229,9 +282,10 @@ Configuration via ``ironic.conf``
and ``v2c``. The default value is ``public``. Optional.
- ``snmp_security``: SNMP security name required for version ``v3``.
Optional.
- - ``snmp_auth_proto``: The SNMPv3 auth protocol. The valid value and the
- default value are both ``sha``. We will add more supported valid values
- in the future. Optional.
+ - ``snmp_auth_proto``: The SNMPv3 auth protocol. If using iRMC S4 or S5, the
+ valid value of this option is only ``sha``. If using iRMC S6, the valid
+ values are ``sha256``, ``sha384`` and ``sha512``. The default value is
+ ``sha``. Optional.
- ``snmp_priv_proto``: The SNMPv3 privacy protocol. The valid value and
the default value are both ``aes``. We will add more supported valid values
in the future. Optional.
diff --git a/doc/source/admin/drivers/redfish.rst b/doc/source/admin/drivers/redfish.rst
index dd19f8bde..063dd1fe5 100644
--- a/doc/source/admin/drivers/redfish.rst
+++ b/doc/source/admin/drivers/redfish.rst
@@ -87,8 +87,18 @@ field:
The "auto" mode first tries "session" and falls back
to "basic" if session authentication is not supported
by the Redfish BMC. Default is set in ironic config
- as ``[redfish]auth_type``.
+ as ``[redfish]auth_type``. Most operators should not
+ need to leverage this setting. Session based
+ authentication should generally be used in most
+ cases as it prevents re-authentication every time
+ a background task checks in with the BMC.
+.. note::
+ The ``redfish_address``, ``redfish_username``, ``redfish_password``,
+ and ``redfish_verify_ca`` fields, if changed, will trigger a new session
+ to be establsihed and cached with the BMC. The ``redfish_auth_type`` field
+ will only be used for the creation of a new cached session, or should
+ one be rejected by the BMC.
The ``baremetal node create`` command can be used to enroll
a node with the ``redfish`` driver. For example:
@@ -533,6 +543,8 @@ settings. The following fields will be returned in the BIOS API
"``unique``", "The setting is specific to this node"
"``reset_required``", "After changing this setting a node reboot is required"
+.. _node-vendor-passthru-methods:
+
Node Vendor Passthru Methods
============================
@@ -620,6 +632,75 @@ Eject Virtual Media
"boot_device (optional)", "body", "string", "Type of the device to eject (all devices by default)"
+Internal Session Cache
+======================
+
+The ``redfish`` hardware type, and derived interfaces, utilizes a built-in
+session cache which prevents Ironic from re-authenticating every time
+Ironic attempts to connect to the BMC for any reason.
+
+This consists of cached connectors objects which are used and tracked by
+a unique consideration of ``redfish_username``, ``redfish_password``,
+``redfish_verify_ca``, and finally ``redfish_address``. Changing any one
+of those values will trigger a new session to be created.
+The ``redfish_system_id`` value is explicitly not considered as Redfish
+has a model of use of one BMC to many systems, which is also a model
+Ironic supports.
+
+The session cache default size is ``1000`` sessions per conductor.
+If you are operating a deployment with a larger number of Redfish
+BMCs, it is advised that you do appropriately tune that number.
+This can be tuned via the API service configuration file,
+``[redfish]connection_cache_size``.
+
+Session Cache Expiration
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default, sessions remain cached for as long as possible in
+memory, as long as they have not experienced an authentication,
+connection, or other unexplained error.
+
+Under normal circumstances, the sessions will only be rolled out
+of the cache in order of oldest first when the cache becomes full.
+There is no time based expiration to entries in the session cache.
+
+Of course, the cache is only in memory, and restarting the
+``ironic-conductor`` will also cause the cache to be rebuilt
+from scratch. If this is due to any persistent connectivity issue,
+this may be sign of an unexpected condition, and please consider
+contacting the Ironic developer community for assistance.
+
+Redfish Interoperability Profile
+================================
+
+Ironic projects provides Redfish Interoperability Profile located in
+``redfish-interop-profiles`` folder at source code root. The Redfish
+Interoperability Profile is a JSON document written in a particular format
+that serves two purposes.
+
+* It enables the creation of a human-readable document that merges the
+ profile requirements with the Redfish schema into a single document
+ for developers or users.
+* It allows a conformance test utility to test a Redfish Service
+ implementation for conformance with the profile.
+
+The JSON document structure is intended to align easily with JSON payloads
+retrieved from Redfish Service implementations, to allow for easy comparisons
+and conformance testing. Many of the properties defined within this structure
+have assumed default values that correspond with the most common use case, so
+that those properties can be omitted from the document for brevity.
+
+Validation of Profiles using DMTF tool
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An open source utility has been created by the Redfish Forum to verify that
+a Redfish Service implementation conforms to the requirements included in a
+Redfish Interoperability Profile. The Redfish Interop Validator is available
+for download from the DMTF's organization on Github at
+https://github.com/DMTF/Redfish-Interop-Validator. Refer to instructions in
+README on how to configure and run validation.
+
+
.. _Redfish: http://redfish.dmtf.org/
.. _Sushy: https://opendev.org/openstack/sushy
.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security
diff --git a/doc/source/admin/drivers/snmp.rst b/doc/source/admin/drivers/snmp.rst
index 1c402ab9b..eed4ed794 100644
--- a/doc/source/admin/drivers/snmp.rst
+++ b/doc/source/admin/drivers/snmp.rst
@@ -22,39 +22,47 @@ this table could possibly work using a similar driver.
Please report any device status.
-============== ========== ========== =====================
-Manufacturer Model Supported? Driver name
-============== ========== ========== =====================
-APC AP7920 Yes apc_masterswitch
-APC AP9606 Yes apc_masterswitch
-APC AP9225 Yes apc_masterswitchplus
-APC AP7155 Yes apc_rackpdu
-APC AP7900 Yes apc_rackpdu
-APC AP7901 Yes apc_rackpdu
-APC AP7902 Yes apc_rackpdu
-APC AP7911a Yes apc_rackpdu
-APC AP7921 Yes apc_rackpdu
-APC AP7922 Yes apc_rackpdu
-APC AP7930 Yes apc_rackpdu
-APC AP7931 Yes apc_rackpdu
-APC AP7932 Yes apc_rackpdu
-APC AP7940 Yes apc_rackpdu
-APC AP7941 Yes apc_rackpdu
-APC AP7951 Yes apc_rackpdu
-APC AP7960 Yes apc_rackpdu
-APC AP7990 Yes apc_rackpdu
-APC AP7998 Yes apc_rackpdu
-APC AP8941 Yes apc_rackpdu
-APC AP8953 Yes apc_rackpdu
-APC AP8959 Yes apc_rackpdu
-APC AP8961 Yes apc_rackpdu
-APC AP8965 Yes apc_rackpdu
-Aten all? Yes aten
-CyberPower all? Untested cyberpower
-EatonPower all? Untested eatonpower
-Teltronix all? Yes teltronix
-BayTech MRP27 Yes baytech_mrp27
-============== ========== ========== =====================
+============== ============== ========== =====================
+Manufacturer Model Supported? Driver name
+============== ============== ========== =====================
+APC AP7920 Yes apc_masterswitch
+APC AP9606 Yes apc_masterswitch
+APC AP9225 Yes apc_masterswitchplus
+APC AP7155 Yes apc_rackpdu
+APC AP7900 Yes apc_rackpdu
+APC AP7901 Yes apc_rackpdu
+APC AP7902 Yes apc_rackpdu
+APC AP7911a Yes apc_rackpdu
+APC AP7921 Yes apc_rackpdu
+APC AP7922 Yes apc_rackpdu
+APC AP7930 Yes apc_rackpdu
+APC AP7931 Yes apc_rackpdu
+APC AP7932 Yes apc_rackpdu
+APC AP7940 Yes apc_rackpdu
+APC AP7941 Yes apc_rackpdu
+APC AP7951 Yes apc_rackpdu
+APC AP7960 Yes apc_rackpdu
+APC AP7990 Yes apc_rackpdu
+APC AP7998 Yes apc_rackpdu
+APC AP8941 Yes apc_rackpdu
+APC AP8953 Yes apc_rackpdu
+APC AP8959 Yes apc_rackpdu
+APC AP8961 Yes apc_rackpdu
+APC AP8965 Yes apc_rackpdu
+Aten all? Yes aten
+CyberPower all? Untested cyberpower
+EatonPower all? Untested eatonpower
+Teltronix all? Yes teltronix
+BayTech MRP27 Yes baytech_mrp27
+Raritan PX3-5547V-V2 Yes raritan_pdu2
+Raritan PX3-5726V Yes raritan_pdu2
+Raritan PX3-5776U-N2 Yes raritan_pdu2
+Raritan PX3-5969U-V2 Yes raritan_pdu2
+Raritan PX3-5961I2U-V2 Yes raritan_pdu2
+Vertiv NU30212 Yes vertivgeist_pdu
+ServerTech CW-16VE-P32M Yes servertech_sentry3
+ServerTech C2WG24SN Yes servertech_sentry4
+============== ============== ========== =====================
Software Requirements
diff --git a/doc/source/admin/hardware-burn-in.rst b/doc/source/admin/hardware-burn-in.rst
index 503664182..35f231d11 100644
--- a/doc/source/admin/hardware-burn-in.rst
+++ b/doc/source/admin/hardware-burn-in.rst
@@ -108,6 +108,13 @@ Then launch the test with:
baremetal node clean --clean-steps '[{"step": "burnin_disk", \
"interface": "deploy"}]' $NODE_NAME_OR_UUID
+In order to launch a parallel SMART self test on all devices after the
+disk burn-in (which will fail the step if any of the tests fail), set:
+
+.. code-block:: console
+
+ baremetal node set --driver-info agent_burnin_fio_disk_smart_test=True \
+ $NODE_NAME_OR_UUID
Network burn-in
===============
diff --git a/doc/source/admin/metrics.rst b/doc/source/admin/metrics.rst
index f435a50c5..733c6569b 100644
--- a/doc/source/admin/metrics.rst
+++ b/doc/source/admin/metrics.rst
@@ -17,8 +17,11 @@ These performance measurements, herein referred to as "metrics", can be
emitted from the Bare Metal service, including ironic-api, ironic-conductor,
and ironic-python-agent. By default, none of the services will emit metrics.
-Configuring the Bare Metal Service to Enable Metrics
-====================================================
+It is important to stress that not only statsd is supported for metrics
+collection and transmission. This is covered later on in our documentation.
+
+Configuring the Bare Metal Service to Enable Metrics with Statsd
+================================================================
Enabling metrics in ironic-api and ironic-conductor
---------------------------------------------------
@@ -62,6 +65,30 @@ in the ironic configuration file as well::
agent_statsd_host = 198.51.100.2
agent_statsd_port = 8125
+.. Note::
+ Use of a different metrics backend with the agent is not presently
+ supported.
+
+Transmission to the Message Bus Notifier
+========================================
+
+Regardless if you're using Ceilometer,
+`ironic-prometheus-exporter <https://docs.openstack.org/ironic-prometheus-exporter/latest/>`_,
+or some scripting you wrote to consume the message bus notifications,
+metrics data can be sent to the message bus notifier from the timer methods
+*and* additional gauge counters by utilizing the ``[metrics]backend``
+configuration option and setting it to ``collector``. When this is the case,
+Information is cached locally and periodically sent along with the general sensor
+data update to the messaging notifier, which can consumed off of the message bus,
+or via notifier plugin (such as is done with ironic-prometheus-exporter).
+
+.. NOTE::
+ Transmission of timer data only works for the Conductor or ``single-process``
+ Ironic service model. A separate webserver process presently does not have
+ the capability of triggering the call to retrieve and transmit the data.
+
+.. NOTE::
+ This functionality requires ironic-lib version 5.4.0 to be installed.
Types of Metrics Emitted
========================
@@ -79,6 +106,9 @@ additional load before enabling metrics. To see which metrics have changed names
or have been removed between releases, refer to the `ironic release notes
<https://docs.openstack.org/releasenotes/ironic/>`_.
+Additional conductor metrics in the form of counts will also be generated in
+limited locations where petinant to the activity of the conductor.
+
.. note::
With the default statsd configuration, each timing metric may create
additional metrics due to how statsd handles timing metrics. For more
diff --git a/doc/source/admin/retirement.rst b/doc/source/admin/retirement.rst
index e4884e0f4..aab307bac 100644
--- a/doc/source/admin/retirement.rst
+++ b/doc/source/admin/retirement.rst
@@ -23,6 +23,27 @@ scheduling of instances, but will still allow for other operations,
such as cleaning, to happen (this marks an important difference to
nodes which have the ``maintenance`` flag set).
+Requirements
+============
+
+The use of the retirement feature requires that automated cleaning
+be enabled. The default ``[conductor]automated_clean`` setting must
+not be disabled as the retirement feature is only engaged upon
+the completion of cleaning as it sets forth the expectation of removing
+sensitive data from a node.
+
+If you're uncomfortable with full cleaning, but want to make use of the
+the retirement feature, a compromise may be to explore use of metadata
+erasure, however this will leave additional data on disk which you may
+wish to erase completely. Please consult the configuration for the
+``[deploy]erase_devices_metadata_priority`` and
+``[deploy]erase_devices_priority`` settings, and do note that
+clean steps can be manually invoked through manual cleaning should you
+wish to trigger the ``erase_devices`` clean step to completely wipe
+all data from storage devices. Alternatively, automated cleaning can
+also be enabled on an individual node level using the
+``baremetal node set --automated-clean <node_id>`` command.
+
How to use
==========
diff --git a/doc/source/admin/secure-rbac.rst b/doc/source/admin/secure-rbac.rst
index 639cfcb23..1f1bb66d1 100644
--- a/doc/source/admin/secure-rbac.rst
+++ b/doc/source/admin/secure-rbac.rst
@@ -267,3 +267,43 @@ restrictive and an ``owner`` may revoke access to ``lessee``.
Access to the underlying baremetal node is not exclusive between the
``owner`` and ``lessee``, and this use model expects that some level of
communication takes place between the appropriate parties.
+
+Can I, a project admin, create a node?
+--------------------------------------
+
+Starting in API version ``1.80``, the capability was added
+to allow users with an ``admin`` role to be able to create and
+delete their own nodes in Ironic.
+
+This functionality is enabled by default, and automatically
+imparts ``owner`` privileges to the created Bare Metal node.
+
+This functionality can be disabled by setting
+``[api]project_admin_can_manage_own_nodes`` to ``False``.
+
+Can I use a service role?
+-------------------------
+
+In later versions of Ironic, the ``service`` role has been added to enable
+delineation of accounts and access to Ironic's API. As Ironic's API was
+largely originally intended as an "admin" API service, the service role
+enables similar levels of access as a project-scoped user with the
+``admin`` or ``manager`` roles.
+
+In terms of access, this is likely best viewed as a user with the
+``manager`` role, but with slight elevation in privilege to enable
+usage of the service via a service account.
+
+A project scoped user with the ``service`` role is able to create
+baremetal nodes, but is not able to delete them. To disable the
+ability to create nodes, set the
+``[api]project_admin_can_manage_own_nodes`` setting to ``False``.
+The nodes which can be accessed/managed in the project scope also align
+with the ``owner`` and ``lessee`` access model, and thus if nodes are not
+matching the user's ``project_id``, then Ironic's API will appear not to
+have any enrolled baremetal nodes.
+
+With the system scope, a user with the ``service`` role is able to
+create baremetal nodes, but also, not delete them. The access rights
+are modeled such an ``admin`` scoped is needed to delete baremetal
+nodes from Ironic.
diff --git a/doc/source/admin/troubleshooting.rst b/doc/source/admin/troubleshooting.rst
index fa04d3006..72e969b6e 100644
--- a/doc/source/admin/troubleshooting.rst
+++ b/doc/source/admin/troubleshooting.rst
@@ -973,3 +973,174 @@ Unfortunately, due to the way the conductor is designed, it is not possible to
gracefully break a stuck lock held in ``*-ing`` states. As the last resort, you
may need to restart the affected conductor. See `Why are my nodes stuck in a
"-ing" state?`_.
+
+What is ConcurrentActionLimit?
+==============================
+
+ConcurrentActionLimit is an exception which is raised to clients when an
+operation is requested, but cannot be serviced at that moment because the
+overall threshold of nodes in concurrent "Deployment" or "Cleaning"
+operations has been reached.
+
+These limits exist for two distinct reasons.
+
+The first is they allow an operator to tune a deployment such that too many
+concurrent deployments cannot be triggered at any given time, as a single
+conductor has an internal limit to the number of overall concurrent tasks,
+this restricts only the number of running concurrent actions. As such, this
+accounts for the number of nodes in ``deploy`` and ``deploy wait`` states.
+In the case of deployments, the default value is relatively high and should
+be suitable for *most* larger operators.
+
+The second is to help slow down the ability in which an entire population of
+baremetal nodes can be moved into and through cleaning, in order to help
+guard against authenticated malicious users, or accidental script driven
+operations. In this case, the total number of nodes in ``deleting``,
+``cleaning``, and ``clean wait`` are evaluated. The default maximum limit
+for cleaning operations is *50* and should be suitable for the majority of
+baremetal operators.
+
+These settings can be modified by using the
+``[conductor]max_concurrent_deploy`` and ``[conductor]max_concurrent_clean``
+settings from the ironic.conf file supporting the ``ironic-conductor``
+service. Neither setting can be explicity disabled, however there is also no
+upper limit to the setting.
+
+.. note::
+ This was an infrastructure operator requested feature from actual lessons
+ learned in the operation of Ironic in large scale production. The defaults
+ may not be suitable for the largest scale operators.
+
+Why do I have an error that an NVMe Partition is not a block device?
+====================================================================
+
+In some cases, you can encounter an error that suggests a partition that has
+been created on an NVMe block device, is not a block device.
+
+Example:
+
+ lsblk: /dev/nvme0n1p2: not a block device
+
+What has happened is the partition contains a partition table inside of it
+which is confusing the NVMe device interaction. While basically valid in
+some cases to have nested partition tables, for example, with software
+raid, in the NVMe case the driver and possibly the underlying device gets
+quite confused. This is in part because partitions in NVMe devices are higher
+level abstracts.
+
+The way this occurs is you likely had a ``whole-disk`` image, and it was
+configured as a partition image. If using glance, your image properties
+may have a ``img_type`` field, which should be ``whole-disk``, or you
+have a ``kernel_id`` and ``ramdisk_id`` value in the glance image
+``properties`` field. Definition of a kernel and ramdisk value also
+indicates that the image is of a ``partition`` image type. This is because
+a ``whole-disk`` image is bootable from the contents within the image,
+and partition images are unable to be booted without a kernel, and ramdisk.
+
+If you are using Ironic in standalone mode, the optional
+``instance_info/image_type`` setting may be advisable to be checked.
+Very similar to Glance usage above, if you have set Ironic's node level
+``instance_info/kernel`` and ``instance_info/ramdisk`` parameters, Ironic
+will proceed with deploying an image as if it is a partition image, and
+create a partition table on the new block device, and then write the
+contents of the image into the newly created partition.
+
+.. NOTE::
+ As a general reminder, the Ironic community recommends the use of
+ whole disk images over the use of partition images.
+
+Why can't I use Secure Erase/Wipe with RAID controllers?
+========================================================
+
+Situations have been reported where an infrastructure operator is expecting
+particular device types to be Secure Erased or Wiped when they are behind a
+RAID controller.
+
+For example, the server may have NVMe devices attached to a RAID controller
+which could be in pass-through or single disk volume mode. The same scenario
+exists basically regardless of the disk/storage medium/type.
+
+The basic reason why is that RAID controllers essentially act as command
+translators with a buffer cache. They tend to offer a simplified protocol
+to the Operating System, and interact with the storage device in whatever
+protocol is native to the device. This is the root of the underlying
+problem.
+
+Protocols such as SCSI are rooted in quite a bit of computing history,
+but never evolved to include primitives like Secure Erase which evolved in
+the `ATA protocol <https://en.wikipedia.org/wiki/Parallel_ATA#HDD_passwords_and_security>`_.
+
+The closest primitives in SCSI to ATA Secure Erase is the ``FORMAT UNIT``
+and ``UNMAP`` commands.
+
+``FORMAT UNIT`` might be a viable solution, and a tool named
+`sg_format <https://linux.die.net/man/8/sg_format>`_ exists,
+but there has not been a sufficient call upstream to implement this and
+test it sufficiently that the Ironic community would be comfortable
+shipping such a capability. The possibility also exists that a RAID
+controller might not translate this command through to an end device,
+just as some RAID controllers know how to handle and pass through
+ATA commands to disk devices which support them. It is entirely dependent
+upon the hardware configuration scenario.
+
+The ``UNMAP`` command is similar to the ATA ``TRIM`` command. Unfortunately
+the SCSI protocol requires this be performed at block level, and similar to
+``FORMAT UNIT``, it may not be supported or just passed through.
+
+If your interested in working on this area, or are willing to help test,
+please feel free to contact the
+:doc:`Ironic development community </contributor/community>`.
+An additional option is the creation of your own
+`custom Hardware Manager <https://opendev.org/openstack/ironic-python-agent/src/branch/master/examples/custom-disk-erase>`_
+which can contain your preferred logic, however this does require some Python
+development experience.
+
+One last item of note, depending on the RAID controller, the BMC, and a number
+of other variables, you may be able to leverage the `RAID <raid>`_
+configuration interface to delete volumes/disks, and recreate them. This may
+have the same effect as a clean disk, however that too is RAID controller
+dependent behavior.
+
+I'm in "clean failed" state, what do I do?
+==========================================
+
+There is only one way to exit the ``clean failed`` state. But before we visit
+the answer as to **how**, we need to stress the importance of attempting to
+understand **why** cleaning failed. On the simple side of things, this may be
+as simple as a DHCP failure, but on a complex side of things, it could be that
+a cleaning action failed against the underlying hardware, possibly due to
+a hardware failure.
+
+As such, we encourage everyone to attempt to understand **why** before exiting
+the ``clean failed`` state, because you could potentially make things worse
+for yourself. For example if firmware updates were being performed, you may
+need to perform a rollback operation against the physical server, depending on
+what, and how the firmware was being updated. Unfortunately this also borders
+the territory of "no simple answer".
+
+This can be counter balanced with sometimes there is a transient networking
+failure and a DHCP address was not obtained. An example of this would be
+suggested by the ``last_error`` field indicating something about "Timeout
+reached while cleaning the node", however we recommend following several
+basic troubleshooting steps:
+
+* Consult the ``last_error`` field on the node, utilizing the
+ ``baremetal node show <uuid>`` command.
+* If the version of ironic supports the feature, consult the node history
+ log, ``baremetal node history list`` and
+ ``baremetal node history get <uuid>``.
+* Consult the acutal console screen of the physical machine. *If* the ramdisk
+ booted, you will generally want to investigate the controller logs and see
+ if an uploaded agent log is being stored on the conductor responsible for
+ the baremetal node. Consult `Retrieving logs from the deploy ramdisk`_.
+ If the node did not boot for some reason, you can typically just retry
+ at this point and move on.
+
+How to get out of the state, once you've understood **why** you reached it
+in the first place, is to utilize the ``baremetal node manage <node_id>``
+command. This returns the node to ``manageable`` state, from where you can
+retry "cleaning" through automated cleaning with the ``provide`` command,
+or manual cleaning with ``clean`` command. or the next appropriate action
+in the workflow process you are attempting to follow, which may be
+ultimately be decommissioning the node because it could have failed and is
+being removed or replaced.
diff --git a/doc/source/contributor/dev-quickstart.rst b/doc/source/contributor/dev-quickstart.rst
index 3fe03f02b..6f63104f9 100644
--- a/doc/source/contributor/dev-quickstart.rst
+++ b/doc/source/contributor/dev-quickstart.rst
@@ -131,6 +131,13 @@ The unit tests need a local database setup, you can use
``tools/test-setup.sh`` to set up the database the same way as setup
in the OpenStack test systems.
+.. note::
+ If you encounter issues executing unit tests, specifically where errors
+ may indicate that a field is too long, check your database's default
+ character encoding. Debian specifically sets MariaDB to ``utf8mb4``
+ which utilizes 4 byte encoded unicode characters by default, which is
+ incompatible by default.
+
Additional Tox Targets
----------------------
diff --git a/doc/source/contributor/ironic-boot-from-volume.rst b/doc/source/contributor/ironic-boot-from-volume.rst
index fc3fd1c2b..673a189be 100644
--- a/doc/source/contributor/ironic-boot-from-volume.rst
+++ b/doc/source/contributor/ironic-boot-from-volume.rst
@@ -125,7 +125,8 @@ You can also run an integration test that an instance is booted from a remote
volume with tempest in the environment::
cd /opt/stack/tempest
- tox -e all-plugin -- ironic_tempest_plugin.tests.scenario.test_baremetal_boot_from_volume
+ tox -e venv-tempest -- pip install (path to the ironic-tempest-plugin directory)
+ tox -e all -- ironic_tempest_plugin.tests.scenario.test_baremetal_boot_from_volume
Please note that the storage interface will only indicate errors based upon
the state of the node and the configuration present. As such a node does not
diff --git a/doc/source/contributor/releasing.rst b/doc/source/contributor/releasing.rst
index eab0649c6..a83bdc2bd 100644
--- a/doc/source/contributor/releasing.rst
+++ b/doc/source/contributor/releasing.rst
@@ -5,7 +5,7 @@ Releasing Ironic Projects
Since the responsibility for releases will move between people, we document
that process here.
-A full list of projects that ironic manages is available in the `governance
+A full list of projects that Ironic manages is available in the `governance
site`_.
.. _`governance site`: https://governance.openstack.org/reference/projects/ironic.html
@@ -33,7 +33,7 @@ documented in the `Project Team Guide`_.
What do we have to release?
===========================
-The ironic project has a number of deliverables under its governance. The
+The Ironic project has a number of deliverables under its governance. The
ultimate source of truth for this is `projects.yaml
<https://opendev.org/openstack/governance/src/branch/master/reference/projects.yaml>`__
in the governance repository. These deliverables have varying release models,
@@ -41,7 +41,7 @@ and these are defined in the `deliverables YAML files
<https://opendev.org/openstack/releases/src/branch/master/deliverables>`__ in
the releases repository.
-In general, ironic deliverables follow the `cycle-with-intermediary
+In general, Ironic deliverables follow the `cycle-with-intermediary
<https://releases.openstack.org/reference/release_models.html#cycle-with-intermediary>`__
release model.
@@ -125,26 +125,30 @@ openstack ``stable/NAME`` branches:
* ironic-inspector
* ironic-python-agent
-They are also released on a regular cadence as opposed to on-demand, namely
-three times a release cycle (roughly a release every 2 months). One of the
-releases corresponds to the coordinated OpenStack released and receives a
-``stable/NAME`` branch. The other two happen during the cycle and receive a
-``bugfix/X.Y`` branch, where ``X.Y`` consists of the major and the minor
-component of the version (e.g. ``bugfix/8.1`` for 8.1.0).
+These projects receive releases every six months as part of the coordinated
+OpenStack release that happens semi-annually. These releases can be
+found in a ``stable/NAME`` branch.
+
+They are also evaluated for additional bugfix releases between scheduled
+stable releases at the two and four month milestone between stable releases
+(roughly every 2 months). These releases can be found in a ``bugfix/X.Y``
+branch. A bugfix release is only created if there are significant
+beneficial changes and a known downstream operator or distributor will consume
+the release.
To leave some version space for releases from these branches, releases of these
projects from the master branch always increase either the major or the minor
version.
-Currently releases from bugfix branches cannot be automated and must be done by
-the release team manually.
+Currently releases and retirements from bugfix branches cannot be automated and
+must be done by the release team manually.
-After the creation of a bugfix branch it is utmost important to update the
-upper-constraints link for the tests in the tox.ini file, plus override the
-branch for the requirements project to be sure to use the correct
+After the creation of a bugfix branch it is of the utmost importance to update
+the upper-constraints link for the tests in the tox.ini file, plus override
+the branch for the requirements project to be sure to use the correct
upper-constraints; for example see the following change:
-https://review.opendev.org/c/openstack/ironic-python-agent/+/841290
+https://review.opendev.org/c/openstack/Ironic-python-agent/+/841290
Things to do before releasing
=============================
@@ -155,7 +159,7 @@ Things to do before releasing
Combine release notes if necessary (for example, a release note for a
feature and another release note to add to that feature may be combined).
-* For ironic releases only, not ironic-inspector releases: if any new API
+* For Ironic releases only, not Ironic-inspector releases: if any new API
microversions have been added since the last release, update the REST API
version history (``doc/source/contributor/webapi-version-history.rst``) to
indicate that they were part of the new release.
@@ -196,7 +200,7 @@ following the next steps:
deliverable (i.e. subproject) grouped by release cycles.
* The ``_independent`` directory contains yaml files for deliverables that
- are not bound to (official) cycles (e.g. ironic-python-agent-builder).
+ are not bound to (official) cycles (e.g. Ironic-python-agent-builder).
* To check the changes we're about to release we can use the tox environment
``list-unreleased-changes``, with this syntax:
@@ -209,7 +213,7 @@ following the next steps:
not stable/ussuri or stable/train).
For example, assuming we're in the main directory of the releases repository,
- to check the changes in the ussuri series for ironic-python-agent
+ to check the changes in the ussuri series for Ironic-python-agent
type:
.. code-block:: bash
@@ -239,12 +243,12 @@ following the next steps:
The ``--intermediate-branch`` option is used to create an intermediate
bugfix branch following the
- `new release model for ironic projects <https://specs.openstack.org/openstack/ironic-specs/specs/not-implemented/new-release-model.html>`_.
+ `new release model for Ironic projects <https://specs.openstack.org/openstack/ironic-specs/specs/not-implemented/new-release-model.html>`_.
To propose the release, use the script to update the deliverable file, then
commit the change, and propose it for review.
- For example, to propose a minor release for ironic in the master branch
+ For example, to propose a minor release for Ironic in the master branch
(current development branch), considering that the code name of the future
stable release is wallaby, use:
@@ -256,7 +260,7 @@ following the next steps:
deliverable, the new version and the branch, if applicable.
A good commit message title should also include the same, for example
- "Release ironic 1.2.3 for ussuri"
+ "Release Ironic 1.2.3 for ussuri"
* As an optional step, we can use ``tox -e list-changes`` to double-check the
changes before submitting them for review.
@@ -306,7 +310,7 @@ This includes:
We need to submit patches for changes in the stable branch to:
-* update the ironic devstack plugin to point at the branched tarball for IPA.
+* update the Ironic devstack plugin to point at the branched tarball for IPA.
An example of this patch is
`here <https://review.opendev.org/685069/>`_.
* set appropriate defaults for ``TEMPEST_BAREMETAL_MIN_MICROVERSION`` and
@@ -320,7 +324,7 @@ We need to submit patches for changes on master to:
need to make these changes. Note that we need to wait until *after* the
switch in grenade is made to test the latest release (N) with master
(e.g. `for stable/queens <https://review.opendev.org/#/c/543615>`_).
- Doing these changes sooner -- after the ironic release and before the switch
+ Doing these changes sooner -- after the Ironic release and before the switch
when grenade is testing the prior release (N-1) with master, will cause
the tests to fail. (You may want to ask/remind infra/qa team, as to
when they will do this switch.)
@@ -331,7 +335,7 @@ We need to submit patches for changes on master to:
only support upgrades from the most recent named release to master.
* remove any DB migration scripts from ``ironic.cmd.dbsync.ONLINE_MIGRATIONS``
- and remove the corresponding code from ironic. (These migration scripts
+ and remove the corresponding code from Ironic. (These migration scripts
are used to migrate from an old release to this latest release; they
shouldn't be needed after that.)
diff --git a/doc/source/contributor/webapi-version-history.rst b/doc/source/contributor/webapi-version-history.rst
index 58c0598eb..11e529292 100644
--- a/doc/source/contributor/webapi-version-history.rst
+++ b/doc/source/contributor/webapi-version-history.rst
@@ -2,6 +2,28 @@
REST API Version History
========================
+1.82 (Antelope)
+----------------------
+
+This version signifies the addition of node sharding endpoints.
+
+- Adds support for get, set, and delete of shard key on Node object.
+- Adds support for ``GET /v1/shards`` which returns a list of all shards and
+ the count of nodes assigned to each.
+
+1.81 (Antelope)
+----------------------
+
+Add endpoint to retrieve introspection data for nodes via the REST API.
+
+* ``GET /v1/nodes/{node_ident}/inventory/``
+
+1.80 (Zed, 21.1)
+----------------------
+
+This version is a signifier of additional RBAC functionality allowing
+a project scoped ``admin`` to create or delete nodes in Ironic.
+
1.79 (Zed, 21.0)
----------------------
A node with the same name as the allocation ``name`` is moved to the
@@ -9,6 +31,7 @@ start of the derived candidate list.
1.78 (Xena, 18.2)
----------------------
+
Add endpoints to allow history events for nodes to be retrieved via
the REST API.
diff --git a/doc/source/install/include/common-prerequisites.inc b/doc/source/install/include/common-prerequisites.inc
index edaca46d0..718e80c9d 100644
--- a/doc/source/install/include/common-prerequisites.inc
+++ b/doc/source/install/include/common-prerequisites.inc
@@ -22,8 +22,16 @@ MySQL database that is used by other OpenStack services.
.. code-block:: console
# mysql -u root -p
- mysql> CREATE DATABASE ironic CHARACTER SET utf8;
+ mysql> CREATE DATABASE ironic CHARACTER SET utf8mb3;
mysql> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'localhost' \
IDENTIFIED BY 'IRONIC_DBPASSWORD';
mysql> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'%' \
IDENTIFIED BY 'IRONIC_DBPASSWORD';
+
+.. note::
+ When creating the database to house Ironic, specifically on MySQL/MariaDB,
+ the character set *cannot* be 4 byte Unicode characters. This is due to
+ an internal structural constraint. UTF8, in these database platforms,
+ has traditionally meant ``utf8mb3``, short for "UTF-8, 3 byte encoding",
+ however the platforms are expected to move to ``utf8mb4`` which is
+ incompatible with Ironic.
diff --git a/driver-requirements.txt b/driver-requirements.txt
index 5333dbd4f..876e817cb 100644
--- a/driver-requirements.txt
+++ b/driver-requirements.txt
@@ -4,7 +4,7 @@
# python projects they should package as optional dependencies for Ironic.
# These are available on pypi
-proliantutils>=2.13.0
+proliantutils>=2.14.0
pysnmp>=4.3.0,<5.0.0
python-scciclient>=0.12.2
python-dracclient>=5.1.0,<9.0.0
@@ -17,4 +17,4 @@ ansible>=2.7
python-ibmcclient>=0.2.2,<0.3.0
# Dell EMC iDRAC sushy OEM extension
-sushy-oem-idrac>=4.0.0,<5.0.0
+sushy-oem-idrac>=5.0.0,<6.0.0
diff --git a/ironic/api/controllers/v1/__init__.py b/ironic/api/controllers/v1/__init__.py
index a944dec69..9bd9af985 100644
--- a/ironic/api/controllers/v1/__init__.py
+++ b/ironic/api/controllers/v1/__init__.py
@@ -36,6 +36,7 @@ from ironic.api.controllers.v1 import node
from ironic.api.controllers.v1 import port
from ironic.api.controllers.v1 import portgroup
from ironic.api.controllers.v1 import ramdisk
+from ironic.api.controllers.v1 import shard
from ironic.api.controllers.v1 import utils
from ironic.api.controllers.v1 import versions
from ironic.api.controllers.v1 import volume
@@ -182,6 +183,16 @@ def v1():
'deploy_templates', '',
bookmark=True)
]
+ if utils.allow_shards_endpoint():
+ v1['shards'] = [
+ link.make_link('self',
+ api.request.public_url,
+ 'shards', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'shards', '',
+ bookmark=True)
+ ]
return v1
@@ -200,7 +211,8 @@ class Controller(object):
'conductors': conductor.ConductorsController(),
'allocations': allocation.AllocationsController(),
'events': event.EventsController(),
- 'deploy_templates': deploy_template.DeployTemplatesController()
+ 'deploy_templates': deploy_template.DeployTemplatesController(),
+ 'shards': shard.ShardController(),
}
@method.expose()
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index dab134258..d21b075c8 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -48,6 +48,7 @@ from ironic.common import states as ir_states
from ironic.conductor import steps as conductor_steps
import ironic.conf
from ironic.drivers import base as driver_base
+from ironic.drivers.modules import inspect_utils
from ironic import objects
@@ -179,6 +180,7 @@ def node_schema():
'retired': {'type': ['string', 'boolean', 'null']},
'retired_reason': {'type': ['string', 'null']},
'secure_boot': {'type': ['string', 'boolean', 'null']},
+ 'shard': {'type': ['string', 'null']},
'storage_interface': {'type': ['string', 'null']},
'uuid': {'type': ['string', 'null']},
'vendor_interface': {'type': ['string', 'null']},
@@ -266,6 +268,7 @@ PATCH_ALLOWED_FIELDS = [
'resource_class',
'retired',
'retired_reason',
+ 'shard',
'storage_interface',
'vendor_interface'
]
@@ -1382,6 +1385,7 @@ def _get_fields_for_node_query(fields=None):
'retired',
'retired_reason',
'secure_boot',
+ 'shard',
'storage_interface',
'target_power_state',
'target_provision_state',
@@ -1944,6 +1948,26 @@ class NodeHistoryController(rest.RestController):
node.uuid, event, detail=True)
+class NodeInventoryController(rest.RestController):
+
+ def __init__(self, node_ident):
+ super(NodeInventoryController).__init__()
+ self.node_ident = node_ident
+
+ @METRICS.timer('NodeInventoryController.get')
+ @method.expose()
+ @args.validate(node_ident=args.uuid_or_name)
+ def get(self):
+ """Node inventory of the node.
+
+ :param node_ident: the UUID of a node.
+ """
+ node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:inventory:get', self.node_ident)
+ return inspect_utils.get_introspection_data(node,
+ api.request.context)
+
+
class NodesController(rest.RestController):
"""REST controller for Nodes."""
@@ -1990,6 +2014,7 @@ class NodesController(rest.RestController):
'bios': bios.NodeBiosController,
'allocation': allocation.NodeAllocationController,
'history': NodeHistoryController,
+ 'inventory': NodeInventoryController,
}
@pecan.expose()
@@ -2013,7 +2038,9 @@ class NodesController(rest.RestController):
or (remainder[0] == 'allocation'
and not api_utils.allow_allocations())
or (remainder[0] == 'history'
- and not api_utils.allow_node_history())):
+ and not api_utils.allow_node_history())
+ or (remainder[0] == 'inventory'
+ and not api_utils.allow_node_inventory())):
pecan.abort(http_client.NOT_FOUND)
if remainder[0] == 'traits' and not api_utils.allow_traits():
# NOTE(mgoddard): Returning here will ensure we exhibit the
@@ -2045,7 +2072,8 @@ class NodesController(rest.RestController):
fields=None, fault=None, conductor_group=None,
detail=None, conductor=None, owner=None,
lessee=None, project=None,
- description_contains=None):
+ description_contains=None, shard=None,
+ sharded=None):
if self.from_chassis and not chassis_uuid:
raise exception.MissingParameterValue(
_("Chassis id not specified."))
@@ -2065,6 +2093,12 @@ class NodesController(rest.RestController):
# The query parameters for the 'next' URL
parameters = {}
+
+ # note(JayF): This is where you resolve differences between the name
+ # of the filter in the API and the name of the filter in the DB API.
+ # In the case of lists (args.string_list), you need to append _in to
+ # the filter name in order to exercise the list-aware logic in the
+ # lower level.
possible_filters = {
'maintenance': maintenance,
'chassis_uuid': chassis_uuid,
@@ -2076,10 +2110,12 @@ class NodesController(rest.RestController):
'conductor_group': conductor_group,
'owner': owner,
'lessee': lessee,
+ 'shard_in': shard,
'project': project,
'description_contains': description_contains,
'retired': retired,
- 'instance_uuid': instance_uuid
+ 'instance_uuid': instance_uuid,
+ 'sharded': sharded
}
filters = {}
for key, value in possible_filters.items():
@@ -2098,7 +2134,7 @@ class NodesController(rest.RestController):
# map the name for the call, as we did not pickup a specific
# list of fields to return.
obj_fields = fields
- # NOTE(TheJulia): When a data set of the nodeds list is being
+ # NOTE(TheJulia): When a data set of the nodes list is being
# requested, this method takes approximately 3-3.5% of the time
# when requesting specific fields aligning with Nova's sync
# process. (Local DB though)
@@ -2223,14 +2259,15 @@ class NodesController(rest.RestController):
fault=args.string, conductor_group=args.string,
detail=args.boolean, conductor=args.string,
owner=args.string, description_contains=args.string,
- lessee=args.string, project=args.string)
+ lessee=args.string, project=args.string,
+ shard=args.string_list, sharded=args.boolean)
def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None,
maintenance=None, retired=None, provision_state=None,
marker=None, limit=None, sort_key='id', sort_dir='asc',
driver=None, fields=None, resource_class=None, fault=None,
conductor_group=None, detail=None, conductor=None,
owner=None, description_contains=None, lessee=None,
- project=None):
+ project=None, shard=None, sharded=None):
"""Retrieve a list of nodes.
:param chassis_uuid: Optional UUID of a chassis, to get only nodes for
@@ -2265,15 +2302,20 @@ class NodesController(rest.RestController):
:param owner: Optional string value that set the owner whose nodes
are to be retrurned.
:param lessee: Optional string value that set the lessee whose nodes
- are to be returned.
+ are to be returned.
:param project: Optional string value that set the project - lessee or
owner - whose nodes are to be returned.
+ :param shard: Optional string value that set the shards whose nodes are
+ to be returned.
:param fields: Optional, a list with a specified set of fields
of the resource to be returned.
:param fault: Optional string value to get only nodes with that fault.
:param description_contains: Optional string value to get only nodes
with description field contains matching
value.
+ :param sharded: Optional boolean whether to return a list of
+ nodes with or without a shard set. May be combined
+ with other parameters.
"""
project = api_utils.check_list_policy('node', project)
@@ -2288,6 +2330,9 @@ class NodesController(rest.RestController):
api_utils.check_allow_filter_by_conductor(conductor)
api_utils.check_allow_filter_by_owner(owner)
api_utils.check_allow_filter_by_lessee(lessee)
+ api_utils.check_allow_filter_by_shard(shard)
+ # Sharded is guarded by the same API version as shard
+ api_utils.check_allow_filter_by_shard(sharded)
fields = api_utils.get_request_return_fields(fields, detail,
_DEFAULT_RETURN_FIELDS)
@@ -2304,8 +2349,8 @@ class NodesController(rest.RestController):
detail=detail,
conductor=conductor,
owner=owner, lessee=lessee,
- project=project,
- **extra_args)
+ shard=shard, sharded=sharded,
+ project=project, **extra_args)
@METRICS.timer('NodesController.detail')
@method.expose()
@@ -2317,13 +2362,15 @@ class NodesController(rest.RestController):
resource_class=args.string, fault=args.string,
conductor_group=args.string, conductor=args.string,
owner=args.string, description_contains=args.string,
- lessee=args.string, project=args.string)
+ lessee=args.string, project=args.string,
+ shard=args.string_list, sharded=args.boolean)
def detail(self, chassis_uuid=None, instance_uuid=None, associated=None,
maintenance=None, retired=None, provision_state=None,
marker=None, limit=None, sort_key='id', sort_dir='asc',
driver=None, resource_class=None, fault=None,
conductor_group=None, conductor=None, owner=None,
- description_contains=None, lessee=None, project=None):
+ description_contains=None, lessee=None, project=None,
+ shard=None, sharded=None):
"""Retrieve a list of nodes with detail.
:param chassis_uuid: Optional UUID of a chassis, to get only nodes for
@@ -2360,9 +2407,13 @@ class NodesController(rest.RestController):
are to be returned.
:param project: Optional string value that set the project - lessee or
owner - whose nodes are to be returned.
+ :param shard: Optional - set the shards whose nodes are to be returned.
:param description_contains: Optional string value to get only nodes
with description field contains matching
value.
+ :param sharded: Optional boolean whether to return a list of
+ nodes with or without a shard set. May be combined
+ with other parameters.
"""
project = api_utils.check_list_policy('node', project)
@@ -2380,6 +2431,9 @@ class NodesController(rest.RestController):
raise exception.HTTPNotFound()
api_utils.check_allow_filter_by_conductor(conductor)
+ api_utils.check_allow_filter_by_shard(shard)
+ # Sharded is guarded by the same API version as shard
+ api_utils.check_allow_filter_by_shard(sharded)
extra_args = {'description_contains': description_contains}
return self._get_nodes_collection(chassis_uuid, instance_uuid,
@@ -2393,8 +2447,8 @@ class NodesController(rest.RestController):
conductor_group=conductor_group,
conductor=conductor,
owner=owner, lessee=lessee,
- project=project,
- **extra_args)
+ project=project, shard=shard,
+ sharded=sharded, **extra_args)
@METRICS.timer('NodesController.validate')
@method.expose()
@@ -2462,7 +2516,15 @@ class NodesController(rest.RestController):
raise exception.OperationNotPermitted()
context = api.request.context
- api_utils.check_policy('baremetal:node:create')
+ owned_node = False
+ if CONF.api.project_admin_can_manage_own_nodes:
+ owned_node = api_utils.check_policy_true(
+ 'baremetal:node:create:self_owned_node')
+ else:
+ owned_node = False
+
+ if not owned_node:
+ api_utils.check_policy('baremetal:node:create')
reject_fields_in_newer_versions(node)
@@ -2486,6 +2548,28 @@ class NodesController(rest.RestController):
if not node.get('resource_class'):
node['resource_class'] = CONF.default_resource_class
+ cdict = context.to_policy_values()
+ if cdict.get('system_scope') != 'all' and owned_node:
+ # This only applies when the request is not system
+ # scoped.
+
+ # First identify what was requested, and if there is
+ # a project ID to use.
+ project_id = None
+ requested_owner = node.get('owner', None)
+ if cdict.get('project_id', False):
+ project_id = cdict.get('project_id')
+
+ if requested_owner and requested_owner != project_id:
+ # Translation: If project scoped, and an owner has been
+ # requested, and that owner does not match the requestor's
+ # project ID value.
+ msg = _("Cannot create a node as a project scoped admin "
+ "with an owner other than your own project.")
+ raise exception.Invalid(msg)
+ # Finally, note the project ID
+ node['owner'] = project_id
+
chassis = _replace_chassis_uuid_with_id(node)
chassis_uuid = chassis and chassis.uuid or None
@@ -2589,6 +2673,8 @@ class NodesController(rest.RestController):
policy_checks.append('baremetal:node:update:name')
elif p['path'].startswith('/retired'):
policy_checks.append('baremetal:node:update:retired')
+ elif p['path'].startswith('/shard'):
+ policy_checks.append('baremetal:node:update:shard')
else:
generic_update = True
# always do at least one check
@@ -2739,8 +2825,16 @@ class NodesController(rest.RestController):
raise exception.OperationNotPermitted()
context = api.request.context
- rpc_node = api_utils.check_node_policy_and_retrieve(
- 'baremetal:node:delete', node_ident, with_suffix=True)
+ try:
+ rpc_node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:delete', node_ident, with_suffix=True)
+ except exception.HTTPForbidden:
+ if not CONF.api.project_admin_can_manage_own_nodes:
+ raise
+ else:
+ rpc_node = api_utils.check_node_policy_and_retrieve(
+ 'baremetal:node:delete:self_owned_node', node_ident,
+ with_suffix=True)
chassis_uuid = _get_chassis_uuid(rpc_node)
notify.emit_start_notification(context, rpc_node, 'delete',
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index adc21ebc2..74fc40a8f 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -123,9 +123,9 @@ def convert_with_links(rpc_port, fields=None, sanitize=True):
'local_link_connection',
'physical_network',
'pxe_enabled',
+ 'node_uuid',
)
)
- api_utils.populate_node_uuid(rpc_port, port)
if rpc_port.portgroup_id:
pg = objects.Portgroup.get(api.request.context, rpc_port.portgroup_id)
port['portgroup_uuid'] = pg.uuid
@@ -163,24 +163,13 @@ def port_sanitize(port, fields=None):
def list_convert_with_links(rpc_ports, limit, url, fields=None, **kwargs):
ports = []
for rpc_port in rpc_ports:
- try:
- port = convert_with_links(rpc_port, fields=fields,
- sanitize=False)
- except exception.NodeNotFound:
- # NOTE(dtantsur): node was deleted after we fetched the port
- # list, meaning that the port was also deleted. Skip it.
- LOG.debug('Skipping port %s as its node was deleted',
- rpc_port.uuid)
+ port = convert_with_links(rpc_port, fields=fields,
+ sanitize=False)
+ # NOTE(dtantsur): node was deleted after we fetched the port
+ # list, meaning that the port was also deleted. Skip it.
+ if port['node_uuid'] is None:
continue
- except exception.PortgroupNotFound:
- # NOTE(dtantsur): port group was deleted after we fetched the
- # port list, it may mean that the port was deleted too, but
- # we don't know it. Pretend that the port group was removed.
- LOG.debug('Removing port group UUID from port %s as the port '
- 'group was deleted', rpc_port.uuid)
- rpc_port.portgroup_id = None
- port = convert_with_links(rpc_port, fields=fields,
- sanitize=False)
+
ports.append(port)
return collection.list_convert_with_links(
items=ports,
@@ -210,7 +199,7 @@ class PortsController(rest.RestController):
self.parent_portgroup_ident = portgroup_ident
def _get_ports_collection(self, node_ident, address, portgroup_ident,
- marker, limit, sort_key, sort_dir,
+ shard, marker, limit, sort_key, sort_dir,
resource_url=None, fields=None, detail=None,
project=None):
"""Retrieve a collection of ports.
@@ -221,6 +210,8 @@ class PortsController(rest.RestController):
this MAC address.
:param portgroup_ident: UUID or name of a portgroup, to get only ports
for that portgroup.
+ :param shard: A comma-separated shard list, to get only ports for those
+ shards
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
@@ -253,8 +244,12 @@ class PortsController(rest.RestController):
node_ident = self.parent_node_ident or node_ident
portgroup_ident = self.parent_portgroup_ident or portgroup_ident
- if node_ident and portgroup_ident:
- raise exception.OperationNotPermitted()
+ exclusive_filters = 0
+ for i in [node_ident, portgroup_ident, shard]:
+ if i:
+ exclusive_filters += 1
+ if exclusive_filters > 1:
+ raise exception.OperationNotPermitted()
if portgroup_ident:
# FIXME: Since all we need is the portgroup ID, we can
@@ -281,6 +276,11 @@ class PortsController(rest.RestController):
project=project)
elif address:
ports = self._get_ports_by_address(address, project=project)
+ elif shard:
+ ports = objects.Port.list_by_node_shards(api.request.context,
+ shard, limit,
+ marker_obj, sort_key,
+ sort_dir, project=project)
else:
ports = objects.Port.list(api.request.context, limit,
marker_obj, sort_key=sort_key,
@@ -351,10 +351,11 @@ class PortsController(rest.RestController):
address=args.mac_address, marker=args.uuid,
limit=args.integer, sort_key=args.string,
sort_dir=args.string, fields=args.string_list,
- portgroup=args.uuid_or_name, detail=args.boolean)
+ portgroup=args.uuid_or_name, detail=args.boolean,
+ shard=args.string_list)
def get_all(self, node=None, node_uuid=None, address=None, marker=None,
limit=None, sort_key='id', sort_dir='asc', fields=None,
- portgroup=None, detail=None):
+ portgroup=None, detail=None, shard=None):
"""Retrieve a list of ports.
Note that the 'node_uuid' interface is deprecated in favour
@@ -377,6 +378,8 @@ class PortsController(rest.RestController):
of the resource to be returned.
:param portgroup: UUID or name of a portgroup, to get only ports
for that portgroup.
+ :param shard: Optional, a list of shard ids to filter by, only ports
+ associated with nodes in these shards will be returned.
:raises: NotAcceptable, HTTPNotFound
"""
project = api_utils.check_port_list_policy(
@@ -396,6 +399,8 @@ class PortsController(rest.RestController):
if portgroup and not api_utils.allow_portgroups_subcontrollers():
raise exception.NotAcceptable()
+ api_utils.check_allow_filter_by_shard(shard)
+
fields = api_utils.get_request_return_fields(fields, detail,
_DEFAULT_RETURN_FIELDS)
@@ -408,8 +413,9 @@ class PortsController(rest.RestController):
raise exception.NotAcceptable()
return self._get_ports_collection(node_uuid or node, address,
- portgroup, marker, limit, sort_key,
- sort_dir, resource_url='ports',
+ portgroup, shard, marker, limit,
+ sort_key, sort_dir,
+ resource_url='ports',
fields=fields, detail=detail,
project=project)
@@ -418,10 +424,11 @@ class PortsController(rest.RestController):
@args.validate(node=args.uuid_or_name, node_uuid=args.uuid,
address=args.mac_address, marker=args.uuid,
limit=args.integer, sort_key=args.string,
- sort_dir=args.string,
- portgroup=args.uuid_or_name)
+ sort_dir=args.string, portgroup=args.uuid_or_name,
+ shard=args.string_list)
def detail(self, node=None, node_uuid=None, address=None, marker=None,
- limit=None, sort_key='id', sort_dir='asc', portgroup=None):
+ limit=None, sort_key='id', sort_dir='asc', portgroup=None,
+ shard=None):
"""Retrieve a list of ports with detail.
Note that the 'node_uuid' interface is deprecated in favour
@@ -435,6 +442,8 @@ class PortsController(rest.RestController):
this MAC address.
:param portgroup: UUID or name of a portgroup, to get only ports
for that portgroup.
+ :param shard: comma separated list of shards, to only get ports
+ associated with nodes in those shards.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
This value cannot be larger than the value of max_limit
@@ -452,6 +461,8 @@ class PortsController(rest.RestController):
if portgroup and not api_utils.allow_portgroups_subcontrollers():
raise exception.NotAcceptable()
+ api_utils.check_allow_filter_by_shard(shard)
+
if not node_uuid and node:
# We're invoking this interface using positional notation, or
# explicitly using 'node'. Try and determine which one.
@@ -466,8 +477,8 @@ class PortsController(rest.RestController):
raise exception.HTTPNotFound()
return self._get_ports_collection(node_uuid or node, address,
- portgroup, marker, limit, sort_key,
- sort_dir,
+ portgroup, shard, marker, limit,
+ sort_key, sort_dir,
resource_url='ports/detail',
project=project)
diff --git a/ironic/api/controllers/v1/portgroup.py b/ironic/api/controllers/v1/portgroup.py
index 6c68e07ba..91740d3c7 100644
--- a/ironic/api/controllers/v1/portgroup.py
+++ b/ironic/api/controllers/v1/portgroup.py
@@ -90,10 +90,10 @@ def convert_with_links(rpc_portgroup, fields=None, sanitize=True):
'mode',
'name',
'properties',
- 'standalone_ports_supported'
+ 'standalone_ports_supported',
+ 'node_uuid'
)
)
- api_utils.populate_node_uuid(rpc_portgroup, portgroup)
url = api.request.public_url
portgroup['ports'] = [
link.make_link('self', url, 'portgroups',
diff --git a/ironic/api/controllers/v1/shard.py b/ironic/api/controllers/v1/shard.py
new file mode 100644
index 000000000..7aa086997
--- /dev/null
+++ b/ironic/api/controllers/v1/shard.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironic_lib import metrics_utils
+from oslo_config import cfg
+import pecan
+from webob import exc as webob_exc
+
+from ironic import api
+from ironic.api.controllers.v1 import utils as api_utils
+from ironic.api import method
+from ironic.common.i18n import _
+
+
+CONF = cfg.CONF
+
+METRICS = metrics_utils.get_metrics_logger(__name__)
+
+
+class ShardController(pecan.rest.RestController):
+ """REST controller for shards."""
+
+ @pecan.expose()
+ def _route(self, argv, request=None):
+ if not api_utils.allow_shards_endpoint():
+ msg = _("The API version does not allow shards")
+ if api.request.method in "GET":
+ raise webob_exc.HTTPNotFound(msg)
+ else:
+ raise webob_exc.HTTPMethodNotAllowed(msg)
+ return super(ShardController, self)._route(argv, request)
+
+ @METRICS.timer('ShardController.get_all')
+ @method.expose()
+ def get_all(self):
+ """Retrieve a list of shards.
+
+ :returns: A list of shards.
+ """
+ api_utils.check_policy('baremetal:shards:get')
+
+ return {
+ 'shards': api.request.dbapi.get_shard_list(),
+ }
+
+ @METRICS.timer('ShardController.get_one')
+ @method.expose()
+ def get_one(self, __):
+ """Explicitly do not support getting one."""
+ pecan.abort(404)
diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py
index 04525ff65..12cba1a4a 100644
--- a/ironic/api/controllers/v1/utils.py
+++ b/ironic/api/controllers/v1/utils.py
@@ -86,11 +86,13 @@ STANDARD_TRAITS = os_traits.get_traits()
CUSTOM_TRAIT_PATTERN = "^%s[A-Z0-9_]+$" % os_traits.CUSTOM_NAMESPACE
CUSTOM_TRAIT_REGEX = re.compile(CUSTOM_TRAIT_PATTERN)
-TRAITS_SCHEMA = {'anyOf': [
- {'type': 'string', 'minLength': 1, 'maxLength': 255,
- 'pattern': CUSTOM_TRAIT_PATTERN},
- {'type': 'string', 'enum': STANDARD_TRAITS},
-]}
+TRAITS_SCHEMA = {
+ 'type': 'string', 'minLength': 1, 'maxLength': 255,
+ 'anyOf': [
+ {'pattern': CUSTOM_TRAIT_PATTERN},
+ {'enum': STANDARD_TRAITS},
+ ]
+}
LOCAL_LINK_BASE_SCHEMA = {
'type': 'object',
@@ -805,6 +807,7 @@ VERSIONED_FIELDS = {
'network_data': versions.MINOR_66_NODE_NETWORK_DATA,
'boot_mode': versions.MINOR_75_NODE_BOOT_MODE,
'secure_boot': versions.MINOR_75_NODE_BOOT_MODE,
+ 'shard': versions.MINOR_82_NODE_SHARD
}
for field in V31_FIELDS:
@@ -1063,6 +1066,20 @@ def check_allow_filter_by_lessee(lessee):
'opr': versions.MINOR_65_NODE_LESSEE})
+def check_allow_filter_by_shard(shard):
+ """Check if filtering nodes by shard is allowed.
+
+ Version 1.82 of the API allows filtering nodes by shard.
+ """
+ if (shard is not None and api.request.version.minor
+ < versions.MINOR_82_NODE_SHARD):
+ raise exception.NotAcceptable(_(
+ "Request not acceptable. The minimal required API version "
+ "should be %(base)s.%(opr)s") %
+ {'base': versions.BASE_VERSION,
+ 'opr': versions.MINOR_82_NODE_SHARD})
+
+
def initial_node_provision_state():
"""Return node state to use by default when creating new nodes.
@@ -1339,6 +1356,11 @@ def allow_node_history():
return api.request.version.minor >= versions.MINOR_78_NODE_HISTORY
+def allow_node_inventory():
+ """Check if node inventory is allowed."""
+ return api.request.version.minor >= versions.MINOR_81_NODE_INVENTORY
+
+
def get_request_return_fields(fields, detail, default_fields,
check_detail_version=allow_detail_query,
check_fields_version=None):
@@ -1946,3 +1968,8 @@ def check_allow_clean_disable_ramdisk(target, disable_ramdisk):
elif target != "clean":
raise exception.BadRequest(
_("disable_ramdisk is supported only with manual cleaning"))
+
+
+def allow_shards_endpoint():
+ """Check if shards endpoint is available."""
+ return api.request.version.minor >= versions.MINOR_82_NODE_SHARD
diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py
index 7fc80bc97..f4cd26c0f 100644
--- a/ironic/api/controllers/v1/versions.py
+++ b/ironic/api/controllers/v1/versions.py
@@ -117,7 +117,9 @@ BASE_VERSION = 1
# v1.77: Add fields selector to drivers list and driver detail.
# v1.78: Add node history endpoint
# v1.79: Change allocation behaviour to prefer node name match
-
+# v1.80: Marker to represent self service node creation/deletion
+# v1.81: Add node inventory
+# v1.82: Add node sharding capability
MINOR_0_JUNO = 0
MINOR_1_INITIAL_VERSION = 1
MINOR_2_AVAILABLE_STATE = 2
@@ -198,6 +200,9 @@ MINOR_76_NODE_CHANGE_BOOT_MODE = 76
MINOR_77_DRIVER_FIELDS_SELECTOR = 77
MINOR_78_NODE_HISTORY = 78
MINOR_79_ALLOCATION_NODE_NAME = 79
+MINOR_80_PROJECT_CREATE_DELETE_NODE = 80
+MINOR_81_NODE_INVENTORY = 81
+MINOR_82_NODE_SHARD = 82
# When adding another version, update:
# - MINOR_MAX_VERSION
@@ -205,7 +210,7 @@ MINOR_79_ALLOCATION_NODE_NAME = 79
# explanation of what changed in the new version
# - common/release_mappings.py, RELEASE_MAPPING['master']['api']
-MINOR_MAX_VERSION = MINOR_79_ALLOCATION_NODE_NAME
+MINOR_MAX_VERSION = MINOR_82_NODE_SHARD
# String representations of the minor and maximum versions
_MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION)
diff --git a/ironic/cmd/status.py b/ironic/cmd/status.py
index 10c8a5bfd..d395b985b 100644
--- a/ironic/cmd/status.py
+++ b/ironic/cmd/status.py
@@ -19,7 +19,7 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils
from oslo_upgradecheck import common_checks
from oslo_upgradecheck import upgradecheck
-from sqlalchemy import exc as sa_exc
+import sqlalchemy
from ironic.cmd import dbsync
from ironic.common.i18n import _
@@ -50,7 +50,7 @@ class Checks(upgradecheck.UpgradeCommands):
# when a table is missing, so lets catch it, since it is fatal.
msg = dbsync.DBCommand().check_obj_versions(
ignore_missing_tables=True)
- except sa_exc.NoSuchTableError as e:
+ except sqlalchemy.exc.NoSuchTableError as e:
msg = ('Database table missing. Please ensure you have '
'updated the database schema. Not Found: %s' % e)
return upgradecheck.Result(upgradecheck.Code.FAILURE, details=msg)
@@ -94,6 +94,41 @@ class Checks(upgradecheck.UpgradeCommands):
else:
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+ def _check_allocations_table(self):
+ msg = None
+ engine = enginefacade.reader.get_engine()
+ if 'mysql' not in str(engine.url):
+ # This test only applies to mysql and database schema
+ # selection.
+ return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+ res = engine.execute("show create table allocations")
+ results = str(res.all()).lower()
+ if 'utf8' not in results:
+ msg = ('The Allocations table is is not using UTF8 encoding. '
+ 'This is corrected in later versions of Ironic, where '
+ 'the table character set schema is automatically '
+ 'migrated. Continued use of a non-UTF8 character '
+ 'set may produce unexpected results.')
+
+ if 'innodb' not in results:
+ warning = ('The engine used by MySQL for the allocations '
+ 'table is not the intended engine for the Ironic '
+ 'database tables to use. This may have been a result '
+ 'of an error with the table creation schema. This '
+ 'may require Database Administrator intervention '
+ 'and downtime to dump, modify the table engine to '
+ 'utilize InnoDB, and reload the allocations table to '
+ 'utilize the InnoDB engine.')
+ if msg:
+ msg = msg + ' Additionally: ' + warning
+ else:
+ msg = warning
+
+ if msg:
+ return upgradecheck.Result(upgradecheck.Code.WARNING, details=msg)
+ else:
+ return upgradecheck.Result(upgradecheck.Code.SUCCESS)
+
# A tuple of check tuples of (<name of check>, <check function>).
# The name of the check will be used in the output of this command.
# The check function takes no arguments and returns an
@@ -105,6 +140,8 @@ class Checks(upgradecheck.UpgradeCommands):
_upgrade_checks = (
(_('Object versions'), _check_obj_versions),
(_('Database Index Status'), _check_db_indexes),
+ (_('Allocations Name Field Length Check'),
+ _check_allocations_table),
# Victoria -> Wallaby migration
(_('Policy File JSON to YAML Migration'),
(common_checks.check_policy_json, {'conf': CONF})),
diff --git a/ironic/common/args.py b/ironic/common/args.py
index 94cfe8841..bd13e3eaf 100755
--- a/ironic/common/args.py
+++ b/ironic/common/args.py
@@ -211,12 +211,17 @@ def _validate_schema(name, value, schema):
try:
jsonschema.validate(value, schema)
except jsonschema.exceptions.ValidationError as e:
-
- # The error message includes the whole schema which can be very
- # large and unhelpful, so truncate it to be brief and useful
- error_msg = ' '.join(str(e).split("\n")[:3])[:-1]
- raise exception.InvalidParameterValue(
- _('Schema error for %s: %s') % (name, error_msg))
+ error_msg = _('Schema error for %s: %s') % (name, e.message)
+ # Sometimes the root message is too generic, try to find a possible
+ # root cause:
+ cause = None
+ current = e
+ while current.context:
+ current = jsonschema.exceptions.best_match(current.context)
+ cause = current.message
+ if cause is not None:
+ error_msg += _('. Possible root cause: %s') % cause
+ raise exception.InvalidParameterValue(error_msg)
return value
diff --git a/ironic/common/exception.py b/ironic/common/exception.py
index ddbce6f47..a4925faf3 100644
--- a/ironic/common/exception.py
+++ b/ironic/common/exception.py
@@ -120,6 +120,10 @@ class VolumeTargetBootIndexAlreadyExists(Conflict):
"for the same node already exists.")
+class NodeInventoryAlreadyExists(Conflict):
+ _msg_fmt = _("A node inventory with ID %(id)s already exists.")
+
+
class VifAlreadyAttached(Conflict):
_msg_fmt = _("Unable to attach VIF because VIF %(vif)s is already "
"attached to Ironic %(object_type)s %(object_uuid)s")
@@ -828,6 +832,10 @@ class NodeHistoryNotFound(NotFound):
_msg_fmt = _("Node history record %(history)s could not be found.")
+class NodeInventoryNotFound(NotFound):
+ _msg_fmt = _("Node inventory record for node %(node)s could not be found.")
+
+
class IncorrectConfiguration(IronicException):
_msg_fmt = _("Supplied configuration is incorrect and must be fixed. "
"Error: %(error)s")
@@ -851,3 +859,18 @@ class ImageRefIsARedirect(IronicException):
message=msg,
image_ref=image_ref,
redirect_url=redirect_url)
+
+
+class ConcurrentActionLimit(IronicException):
+ # NOTE(TheJulia): We explicitly don't report the concurrent
+ # action limit configuration value as a security guard since
+ # if informed of the limit, an attacker can tailor their attack.
+ _msg_fmt = _("Unable to process request at this time. "
+ "The concurrent action limit for %(task_type)s "
+ "has been reached. Please contact your administrator "
+ "and try again later.")
+
+
+class SwiftObjectStillExists(IronicException):
+ _msg_fmt = _("Clean up failed for swift object %(obj)s during deletion"
+ " of node %(node)s.")
diff --git a/ironic/common/glance_service/image_service.py b/ironic/common/glance_service/image_service.py
index 0a32eaf0a..1d9d6d4bc 100644
--- a/ironic/common/glance_service/image_service.py
+++ b/ironic/common/glance_service/image_service.py
@@ -33,6 +33,7 @@ from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common import keystone
from ironic.common import swift
+from ironic.common import utils
from ironic.conf import CONF
TempUrlCacheElement = collections.namedtuple('TempUrlCacheElement',
@@ -114,7 +115,7 @@ class GlanceImageService(object):
@tenacity.retry(
retry=tenacity.retry_if_exception_type(
exception.GlanceConnectionFailed),
- stop=tenacity.stop_after_attempt(CONF.glance.num_retries + 1),
+ stop=utils.stop_after_retries('num_retries', group='glance'),
wait=tenacity.wait_fixed(1),
reraise=True
)
diff --git a/ironic/common/images.py b/ironic/common/images.py
index 2b52b789b..aa883ada3 100644
--- a/ironic/common/images.py
+++ b/ironic/common/images.py
@@ -657,10 +657,24 @@ def is_source_a_path(ctx, image_source):
# NOTE(TheJulia): I don't really like this pattern, *but*
# the wholedisk image support is similar.
return
+ # NOTE(TheJulia): Files should have been caught almost exclusively
+ # before with the Content-Length check.
+ # When the ISO is mounted and the webserver mount point url is
+ # checked here, it has both 'Content-Length' and 'Content-Type'
+ # due to which it always returns False. Hence switched the conditions.
+ if ('Content-Type' in headers
+ and str(headers['Content-Type']).startswith('text/html')):
+ LOG.debug('Evaluated %(url)s to determine if it is a URL to a path '
+ 'or a file. A Content-Type header was returned with a text '
+ 'content, which suggests a file list was returned.',
+ {'url': image_source})
+ return True
# When issuing a head request, folders have no length
# A list can be generated by the server.. This is a solid
# hint.
- if 'Content-Length' in headers:
+ if ('Content-Type' in headers
+ and (str(headers['Content-Type']) != 'text/html')
+ and 'Content-Length' in headers):
LOG.debug('Evaluated %(url)s to determine if it is a URL to a path '
'or a file. A Content-Length header was returned '
'suggesting file.',
@@ -668,16 +682,6 @@ def is_source_a_path(ctx, image_source):
# NOTE(TheJulia): Files on a webserver have a length which is returned
# when headres are queried.
return False
- if ('Content-Type' in headers
- and str(headers['Content-Type']).startswith('text')
- and 'Content-Length' not in headers):
- LOG.debug('Evaluated %(url)s to determine if it is a URL to a path '
- 'or a file. A Content-Type header was returned with a text '
- 'content, which suggests a file list was returned.',
- {'url': image_source})
- return True
- # NOTE(TheJulia): Files should have been caught almost exclusively
- # before with the Content-Length check.
if image_source.endswith('/'):
# If all else fails, looks like a URL, and the server didn't give
# us any hints.
diff --git a/ironic/common/policy.py b/ironic/common/policy.py
index a56257e0f..b877611af 100644
--- a/ironic/common/policy.py
+++ b/ironic/common/policy.py
@@ -57,7 +57,7 @@ SYSTEM_MEMBER = 'role:member and system_scope:all'
# support. These uses are also able to view project-specific resources where
# applicable (e.g., listing all volumes in the deployment, regardless of the
# project they belong to).
-SYSTEM_READER = 'role:reader and system_scope:all'
+SYSTEM_READER = '(role:reader and system_scope:all) or (role:service and system_scope:all)' # noqa
# This check string is reserved for actions that require the highest level of
# authorization on a project or resources within the project (e.g., setting the
@@ -83,6 +83,14 @@ PROJECT_MEMBER = ('role:member and '
PROJECT_READER = ('role:reader and '
'(project_id:%(node.owner)s or project_id:%(node.lessee)s)')
+# This check string is used for granting access to other services which need
+# to communicate with Ironic, for example, Nova-Compute to provision nodes,
+# or Ironic-Inspector to create nodes. The idea behind a service role is
+# one which has restricted access to perform operations, that are limited
+# to remote automated and inter-operation processes.
+SYSTEM_SERVICE = ('role:service and system_scope:all')
+PROJECT_SERVICE = ('role:service and project_id:%(node.owner)s')
+
# The following are common composite check strings that are useful for
# protecting APIs designed to operate with multiple scopes (e.g., a system
# administrator should be able to delete any baremetal host in the deployment,
@@ -91,7 +99,7 @@ SYSTEM_OR_PROJECT_MEMBER = (
'(' + SYSTEM_MEMBER + ') or (' + PROJECT_MEMBER + ')'
)
SYSTEM_OR_PROJECT_READER = (
- '(' + SYSTEM_READER + ') or (' + PROJECT_READER + ')'
+ '(' + SYSTEM_READER + ') or (' + PROJECT_READER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
PROJECT_OWNER_ADMIN = ('role:admin and project_id:%(node.owner)s')
@@ -109,28 +117,36 @@ ALLOCATION_OWNER_MANAGER = ('role:manager and project_id:%(allocation.owner)s')
ALLOCATION_OWNER_MEMBER = ('role:member and project_id:%(allocation.owner)s')
ALLOCATION_OWNER_READER = ('role:reader and project_id:%(allocation.owner)s')
+# Used for general operations like changing provision state.
SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used for creation and deletion of network ports.
SYSTEM_ADMIN_OR_OWNER_ADMIN = (
- '(' + SYSTEM_ADMIN + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ')' # noqa
+ '(' + SYSTEM_ADMIN + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used to map system members, and owner admins to the same access rights.
+# This is actions such as update driver interfaces, delete ports.
SYSTEM_MEMBER_OR_OWNER_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used to map "member" only rights, i.e. those of "users using a deployment"
SYSTEM_MEMBER_OR_OWNER_MEMBER = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_MEMBER + ')'
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_MEMBER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Used throughout to map where authenticated readers
+# should be able to read API objects.
SYSTEM_OR_OWNER_READER = (
- '(' + SYSTEM_READER + ') or (' + PROJECT_OWNER_READER + ')'
+ '(' + SYSTEM_READER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_READER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
+# Mainly used for targets/connectors
SYSTEM_MEMBER_OR_OWNER_LESSEE_ADMIN = (
- '(' + SYSTEM_MEMBER + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ')' # noqa
+ '(' + SYSTEM_MEMBER + ') or (' + SYSTEM_SERVICE + ') or (' + PROJECT_OWNER_ADMIN + ') or (' + PROJECT_OWNER_MANAGER + ') or (' + PROJECT_LESSEE_ADMIN + ') or (' + PROJECT_LESSEE_MANAGER + ') or (' + PROJECT_SERVICE + ')' # noqa
)
@@ -152,7 +168,10 @@ ALLOCATION_CREATOR = (
# Special purpose aliases for things like "ability to access the API
# as a reader, or permission checking that does not require node
# owner relationship checking
-API_READER = ('role:reader')
+API_READER = ('(role:reader) or (role:service)')
+
+# Used for ability to view target properties of a volume, which is
+# considered highly restricted.
TARGET_PROPERTIES_READER = (
'(' + SYSTEM_READER + ') or (role:admin)'
)
@@ -436,13 +455,21 @@ deprecated_bios_disable_cleaning = policy.DeprecatedRule(
node_policies = [
policy.DocumentedRuleDefault(
name='baremetal:node:create',
- check_str=SYSTEM_ADMIN,
- scope_types=['system'],
+ check_str='(' + SYSTEM_ADMIN + ') or (' + SYSTEM_SERVICE + ')',
+ scope_types=['system', 'project'],
description='Create Node records',
operations=[{'path': '/nodes', 'method': 'POST'}],
deprecated_rule=deprecated_node_create
),
policy.DocumentedRuleDefault(
+ name='baremetal:node:create:self_owned_node',
+ check_str=('(role:admin) or (role:service)'),
+ scope_types=['project'],
+ description='Create node records which will be tracked '
+ 'as owned by the associated user project.',
+ operations=[{'path': '/nodes', 'method': 'POST'}],
+ ),
+ policy.DocumentedRuleDefault(
name='baremetal:node:list',
check_str=API_READER,
scope_types=['system', 'project'],
@@ -663,7 +690,14 @@ node_policies = [
operations=[{'path': '/nodes/{node_ident}', 'method': 'DELETE'}],
deprecated_rule=deprecated_node_delete
),
-
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:delete:self_owned_node',
+ check_str=PROJECT_ADMIN,
+ scope_types=['project'],
+ description='Delete node records which are associated with '
+ 'the requesting project.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'DELETE'}],
+ ),
policy.DocumentedRuleDefault(
name='baremetal:node:validate',
check_str=SYSTEM_OR_OWNER_MEMBER_AND_LESSEE_ADMIN,
@@ -939,8 +973,34 @@ node_policies = [
# operating context.
deprecated_rule=deprecated_node_get
),
-
-
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:inventory:get',
+ check_str=SYSTEM_OR_OWNER_READER,
+ scope_types=['system', 'project'],
+ description='Retrieve introspection data for a node.',
+ operations=[
+ {'path': '/nodes/{node_ident}/inventory', 'method': 'GET'},
+ ],
+ # This rule fallsback to deprecated_node_get in order to provide a
+ # mechanism so the additional policies only engage in an updated
+ # operating context.
+ deprecated_rule=deprecated_node_get
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:node:update:shard',
+ check_str=SYSTEM_ADMIN,
+ scope_types=['system', 'project'],
+ description='Governs if node shard field can be updated via '
+ 'the API clients.',
+ operations=[{'path': '/nodes/{node_ident}', 'method': 'PATCH'}],
+ ),
+ policy.DocumentedRuleDefault(
+ name='baremetal:shards:get',
+ check_str=SYSTEM_READER,
+ scope_types=['system', 'project'],
+ description='Governs if shards can be read via the API clients.',
+ operations=[{'path': '/shards', 'method': 'GET'}],
+ ),
]
deprecated_port_reason = """
diff --git a/ironic/common/pxe_utils.py b/ironic/common/pxe_utils.py
index 1849aaa7d..371a1b5d5 100644
--- a/ironic/common/pxe_utils.py
+++ b/ironic/common/pxe_utils.py
@@ -59,6 +59,7 @@ DHCPV6_BOOTFILE_NAME = '59' # rfc5970
DHCP_TFTP_SERVER_ADDRESS = '150' # rfc5859
DHCP_IPXE_ENCAP_OPTS = '175' # Tentatively Assigned
DHCP_TFTP_PATH_PREFIX = '210' # rfc5071
+DHCP_SERVER_IP_ADDRESS = '255' # dnsmasq server-ip-address
DEPLOY_KERNEL_RAMDISK_LABELS = ['deploy_kernel', 'deploy_ramdisk']
RESCUE_KERNEL_RAMDISK_LABELS = ['rescue_kernel', 'rescue_ramdisk']
@@ -264,6 +265,9 @@ def get_file_path_from_label(node_uuid, root_dir, label):
:param root_dir: Directory in which the image must be placed
:param label: Name of the image
"""
+ if label in ['ks_template', 'ks_cfg', 'stage2']:
+ path = os.path.join(CONF.deploy.http_root, node_uuid)
+ ensure_tree(path)
if label == 'ks_template':
return os.path.join(CONF.deploy.http_root, node_uuid,
'ks.cfg.template')
@@ -488,7 +492,7 @@ def dhcp_options_for_instance(task, ipxe_enabled=False, url_boot=False,
else:
use_ip_version = int(CONF.pxe.ip_version)
dhcp_opts = []
- dhcp_provider_name = CONF.dhcp.dhcp_provider
+ api = dhcp_factory.DHCPFactory().provider
if use_ip_version == 4:
boot_file_param = DHCP_BOOTFILE_NAME
else:
@@ -517,7 +521,7 @@ def dhcp_options_for_instance(task, ipxe_enabled=False, url_boot=False,
ipxe_script_url = '/'.join([CONF.deploy.http_url, script_name])
# if the request comes from dumb firmware send them the iPXE
# boot image.
- if dhcp_provider_name == 'neutron':
+ if api.supports_ipxe_tag():
# Neutron use dnsmasq as default DHCP agent. Neutron carries the
# configuration to relate to the tags below. The ipxe6 tag was
# added in the Stein cycle which identifies the iPXE User-Class
@@ -588,7 +592,7 @@ def dhcp_options_for_instance(task, ipxe_enabled=False, url_boot=False,
# Related bug was opened on Neutron side:
# https://bugs.launchpad.net/neutron/+bug/1723354
if not url_boot:
- dhcp_opts.append({'opt_name': 'server-ip-address',
+ dhcp_opts.append({'opt_name': DHCP_SERVER_IP_ADDRESS,
'opt_value': CONF.pxe.tftp_server})
# Append the IP version for all the configuration options
@@ -674,20 +678,33 @@ def get_instance_image_info(task, ipxe_enabled=False):
os.path.join(root_dir, node.uuid, 'boot_iso'))
return image_info
-
image_properties = None
d_info = deploy_utils.get_image_instance_info(node)
+ isap = node.driver_internal_info.get('is_source_a_path')
def _get_image_properties():
- nonlocal image_properties
- if not image_properties:
+ nonlocal image_properties, isap
+ if not image_properties and not isap:
i_service = service.get_image_service(
d_info['image_source'],
context=ctx)
image_properties = i_service.show(
d_info['image_source'])['properties']
+ # TODO(TheJulia): At some point, we should teach this code
+ # to understand that with a path, it *can* retrieve the
+ # manifest from the HTTP(S) endpoint, which can populate
+ # image_properties, and drive path to variable population
+ # like is done with basically Glance.
labels = ('kernel', 'ramdisk')
+ if not isap:
+ anaconda_labels = ('stage2', 'ks_template', 'ks_cfg')
+ else:
+ # When a path is used, a stage2 ramdisk can be determiend
+ # automatically by anaconda, so it is not an explicit
+ # requirement.
+ anaconda_labels = ('ks_template', 'ks_cfg')
+
if not (i_info.get('kernel') and i_info.get('ramdisk')):
# NOTE(rloo): If both are not specified in instance_info
# we won't use any of them. We'll use the values specified
@@ -700,20 +717,13 @@ def get_instance_image_info(task, ipxe_enabled=False):
i_info[label] = str(image_properties[label + '_id'])
node.instance_info = i_info
node.save()
+ # TODO(TheJulia): Add functionality to look/grab the hints file
+ # for anaconda and just run with the entire path.
- anaconda_labels = ()
- if deploy_utils.get_boot_option(node) == 'kickstart':
- isap = node.driver_internal_info.get('is_source_a_path')
# stage2: installer stage2 squashfs image
# ks_template: anaconda kickstart template
# ks_cfg - rendered ks_template
- if not isap:
- anaconda_labels = ('stage2', 'ks_template', 'ks_cfg')
- else:
- # When a path is used, a stage2 ramdisk can be determiend
- # automatically by anaconda, so it is not an explicit
- # requirement.
- anaconda_labels = ('ks_template', 'ks_cfg')
+
# NOTE(rloo): We save stage2 & ks_template values in case they
# are changed by the user after we start using them and to
# prevent re-computing them again.
@@ -733,26 +743,31 @@ def get_instance_image_info(task, ipxe_enabled=False):
else:
node.set_driver_internal_info(
'stage2', str(image_properties['stage2_id']))
- # NOTE(TheJulia): A kickstart template is entirely independent
- # of the stage2 ramdisk. In the end, it was the configuration which
- # told anaconda how to execute.
- if i_info.get('ks_template'):
- # If the value is set, we always overwrite it, in the event
- # a rebuild is occuring or something along those lines.
- node.set_driver_internal_info('ks_template',
- i_info['ks_template'])
+ # NOTE(TheJulia): A kickstart template is entirely independent
+ # of the stage2 ramdisk. In the end, it was the configuration which
+ # told anaconda how to execute.
+ if i_info.get('ks_template'):
+ # If the value is set, we always overwrite it, in the event
+ # a rebuild is occuring or something along those lines.
+ node.set_driver_internal_info('ks_template',
+ i_info['ks_template'])
+ else:
+ _get_image_properties()
+ # ks_template is an optional property on the image
+ if image_properties and 'ks_template' in image_properties:
+ node.set_driver_internal_info(
+ 'ks_template', str(image_properties['ks_template']))
else:
- _get_image_properties()
- # ks_template is an optional property on the image
- if 'ks_template' not in image_properties:
- # If not defined, default to the overall system default
- # kickstart template, as opposed to a user supplied
- # template.
- node.set_driver_internal_info(
- 'ks_template', CONF.anaconda.default_ks_template)
- else:
- node.set_driver_internal_info(
- 'ks_template', str(image_properties['ks_template']))
+ # If not defined, default to the overall system default
+ # kickstart template, as opposed to a user supplied
+ # template.
+ node.set_driver_internal_info(
+ 'ks_template',
+ 'file://' + os.path.abspath(
+ CONF.anaconda.default_ks_template
+ )
+ )
+
node.save()
for label in labels + anaconda_labels:
@@ -1004,6 +1019,8 @@ def build_kickstart_config_options(task):
if node.driver_internal_info.get('is_source_a_path', False):
# Record a value so it matches as the template opts in.
params['is_source_a_path'] = 'true'
+ if CONF.anaconda.insecure_heartbeat:
+ params['insecure_heartbeat'] = 'true'
params['agent_token'] = node.driver_internal_info['agent_secret_token']
heartbeat_url = '%s/v1/heartbeat/%s' % (
deploy_utils.get_ironic_api_url().rstrip('/'),
@@ -1251,6 +1268,8 @@ def cache_ramdisk_kernel(task, pxe_info, ipxe_enabled=False):
CONF.deploy.http_root,
'stage2')
ensure_tree(os.path.dirname(file_path))
+
+ if 'ks_cfg' in pxe_info:
# ks_cfg is rendered later by the driver using ks_template. It cannot
# be fetched and cached.
t_pxe_info.pop('ks_cfg')
diff --git a/ironic/common/release_mappings.py b/ironic/common/release_mappings.py
index 9dfe864ee..7162ca4d3 100644
--- a/ironic/common/release_mappings.py
+++ b/ironic/common/release_mappings.py
@@ -490,8 +490,8 @@ RELEASE_MAPPING = {
'VolumeTarget': ['1.0'],
}
},
- 'master': {
- 'api': '1.79',
+ '21.1': {
+ 'api': '1.80',
'rpc': '1.55',
'objects': {
'Allocation': ['1.1'],
@@ -510,6 +510,27 @@ RELEASE_MAPPING = {
'VolumeTarget': ['1.0'],
}
},
+ 'master': {
+ 'api': '1.82',
+ 'rpc': '1.55',
+ 'objects': {
+ 'Allocation': ['1.1'],
+ 'BIOSSetting': ['1.1'],
+ 'Node': ['1.37'],
+ 'NodeHistory': ['1.0'],
+ 'NodeInventory': ['1.0'],
+ 'Conductor': ['1.3'],
+ 'Chassis': ['1.3'],
+ 'Deployment': ['1.0'],
+ 'DeployTemplate': ['1.1'],
+ 'Port': ['1.11'],
+ 'Portgroup': ['1.5'],
+ 'Trait': ['1.0'],
+ 'TraitList': ['1.0'],
+ 'VolumeConnector': ['1.0'],
+ 'VolumeTarget': ['1.0'],
+ }
+ },
}
# NOTE(xek): Assign each named release to the appropriate semver.
@@ -525,9 +546,9 @@ RELEASE_MAPPING = {
#
# There should be at most two named mappings here.
-# NOTE(mgoddard): remove xena prior to the zed release.
-RELEASE_MAPPING['xena'] = RELEASE_MAPPING['18.2']
+# NOTE(mgoddard): remove yoga prior to the antelope release.
RELEASE_MAPPING['yoga'] = RELEASE_MAPPING['20.1']
+RELEASE_MAPPING['zed'] = RELEASE_MAPPING['21.1']
# List of available versions with named versions first; 'master' is excluded.
RELEASE_VERSIONS = sorted(set(RELEASE_MAPPING) - {'master'}, reverse=True)
diff --git a/ironic/common/rpc.py b/ironic/common/rpc.py
index 285ee1f06..710c7a943 100644
--- a/ironic/common/rpc.py
+++ b/ironic/common/rpc.py
@@ -122,10 +122,9 @@ def get_transport_url(url_str=None):
def get_client(target, version_cap=None, serializer=None):
assert TRANSPORT is not None
serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer)
+ return messaging.get_rpc_client(
+ TRANSPORT, target, version_cap=version_cap,
+ serializer=serializer)
def get_server(target, endpoints, serializer=None):
diff --git a/ironic/common/rpc_service.py b/ironic/common/rpc_service.py
index b0eec7758..cb0f23c98 100644
--- a/ironic/common/rpc_service.py
+++ b/ironic/common/rpc_service.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import signal
import sys
import time
@@ -24,6 +25,7 @@ from oslo_log import log
import oslo_messaging as messaging
from oslo_service import service
from oslo_utils import importutils
+from oslo_utils import timeutils
from ironic.common import context
from ironic.common import rpc
@@ -93,6 +95,26 @@ class RPCService(service.Service):
'transport': CONF.rpc_transport})
def stop(self):
+ initial_time = timeutils.utcnow()
+ extend_time = initial_time + datetime.timedelta(
+ seconds=CONF.hash_ring_reset_interval)
+
+ try:
+ self.manager.del_host(deregister=self.deregister)
+ except Exception as e:
+ LOG.exception('Service error occurred when cleaning up '
+ 'the RPC manager. Error: %s', e)
+
+ if self.manager.get_online_conductor_count() > 1:
+ # Delay stopping the server until the hash ring has been
+ # reset on the cluster
+ stop_time = timeutils.utcnow()
+ if stop_time < extend_time:
+ stop_wait = max(0, (extend_time - stop_time).seconds)
+ LOG.info('Waiting %(stop_wait)s seconds for hash ring reset.',
+ {'stop_wait': stop_wait})
+ time.sleep(stop_wait)
+
try:
if self.rpcserver is not None:
self.rpcserver.stop()
@@ -100,11 +122,6 @@ class RPCService(service.Service):
except Exception as e:
LOG.exception('Service error occurred when stopping the '
'RPC server. Error: %s', e)
- try:
- self.manager.del_host(deregister=self.deregister)
- except Exception as e:
- LOG.exception('Service error occurred when cleaning up '
- 'the RPC manager. Error: %s', e)
super(RPCService, self).stop(graceful=True)
LOG.info('Stopped RPC server for service %(service)s on host '
diff --git a/ironic/common/states.py b/ironic/common/states.py
index 89b710189..f2238b41b 100644
--- a/ironic/common/states.py
+++ b/ironic/common/states.py
@@ -269,6 +269,9 @@ _FASTTRACK_LOOKUP_ALLOWED_STATES = (ENROLL, MANAGEABLE, AVAILABLE,
FASTTRACK_LOOKUP_ALLOWED_STATES = frozenset(_FASTTRACK_LOOKUP_ALLOWED_STATES)
"""States where API lookups are permitted with fast track enabled."""
+FAILURE_STATES = frozenset((DEPLOYFAIL, CLEANFAIL, INSPECTFAIL,
+ RESCUEFAIL, UNRESCUEFAIL, ADOPTFAIL))
+
##############
# Power states
diff --git a/ironic/common/swift.py b/ironic/common/swift.py
index 8a98c32d2..87cda4fad 100644
--- a/ironic/common/swift.py
+++ b/ironic/common/swift.py
@@ -111,6 +111,31 @@ class SwiftAPI(object):
return obj_uuid
+ def create_object_from_data(self, object, data, container):
+ """Uploads a given string to Swift.
+
+ :param object: The name of the object in Swift
+ :param data: string data to put in the object
+ :param container: The name of the container for the object.
+ Defaults to the value set in the configuration options.
+ :returns: The Swift UUID of the object
+ :raises: utils.Error, if any operation with Swift fails.
+ """
+ try:
+ self.connection.put_container(container)
+ except swift_exceptions.ClientException as e:
+ operation = _("put container")
+ raise exception.SwiftOperationError(operation=operation, error=e)
+
+ try:
+ obj_uuid = self.connection.create_object(
+ container, object, data=data)
+ except swift_exceptions.ClientException as e:
+ operation = _("put object")
+ raise exception.SwiftOperationError(operation=operation, error=e)
+
+ return obj_uuid
+
def get_temp_url(self, container, obj, timeout):
"""Returns the temp url for the given Swift object.
@@ -143,6 +168,23 @@ class SwiftAPI(object):
(parse_result.scheme, parse_result.netloc, url_path,
None, None, None))
+ def get_object(self, object, container):
+ """Downloads a given object from Swift.
+
+ :param object: The name of the object in Swift
+ :param container: The name of the container for the object.
+ Defaults to the value set in the configuration options.
+ :returns: Swift object
+ :raises: utils.Error, if the Swift operation fails.
+ """
+ try:
+ obj = self.connection.download_object(object, container=container)
+ except swift_exceptions.ClientException as e:
+ operation = _("get object")
+ raise exception.SwiftOperationError(operation=operation, error=e)
+
+ return obj
+
def delete_object(self, container, obj):
"""Deletes the given Swift object.
diff --git a/ironic/common/utils.py b/ironic/common/utils.py
index e4d83c9b6..9ae88d4d6 100644
--- a/ironic/common/utils.py
+++ b/ironic/common/utils.py
@@ -681,3 +681,18 @@ def is_fips_enabled():
except Exception:
pass
return False
+
+
+def stop_after_retries(option, group=None):
+ """A tenacity retry helper that stops after retries specified in conf."""
+ # NOTE(dtantsur): fetch the option inside of the nested call, otherwise it
+ # cannot be changed in runtime.
+ def should_stop(retry_state):
+ if group:
+ conf = getattr(CONF, group)
+ else:
+ conf = CONF
+ num_retries = getattr(conf, option)
+ return retry_state.attempt_number >= num_retries + 1
+
+ return should_stop
diff --git a/ironic/conductor/base_manager.py b/ironic/conductor/base_manager.py
index aa684408f..5c2e4ea95 100644
--- a/ironic/conductor/base_manager.py
+++ b/ironic/conductor/base_manager.py
@@ -88,10 +88,14 @@ class BaseConductorManager(object):
# clear all locks held by this conductor before registering
self.dbapi.clear_node_reservations_for_conductor(self.host)
- def init_host(self, admin_context=None):
+ def init_host(self, admin_context=None, start_consoles=True,
+ start_allocations=True):
"""Initialize the conductor host.
:param admin_context: the admin context to pass to periodic tasks.
+ :param start_consoles: If consoles should be started in intialization.
+ :param start_allocations: If allocations should be started in
+ initialization.
:raises: RuntimeError when conductor is already running.
:raises: NoDriversLoaded when no drivers are enabled on the conductor.
:raises: DriverNotFound if a driver is enabled that does not exist.
@@ -189,8 +193,9 @@ class BaseConductorManager(object):
# Start consoles if it set enabled in a greenthread.
try:
- self._spawn_worker(self._start_consoles,
- ironic_context.get_admin_context())
+ if start_consoles:
+ self._spawn_worker(self._start_consoles,
+ ironic_context.get_admin_context())
except exception.NoFreeConductorWorker:
LOG.warning('Failed to start worker for restarting consoles.')
@@ -207,8 +212,9 @@ class BaseConductorManager(object):
# Resume allocations that started before the restart.
try:
- self._spawn_worker(self._resume_allocations,
- ironic_context.get_admin_context())
+ if start_allocations:
+ self._spawn_worker(self._resume_allocations,
+ ironic_context.get_admin_context())
except exception.NoFreeConductorWorker:
LOG.warning('Failed to start worker for resuming allocations.')
@@ -328,6 +334,10 @@ class BaseConductorManager(object):
self._started = False
+ def get_online_conductor_count(self):
+ """Return a count of currently online conductors"""
+ return len(self.dbapi.get_online_conductors())
+
def _register_and_validate_hardware_interfaces(self, hardware_types):
"""Register and validate hardware interfaces for this conductor.
@@ -539,6 +549,7 @@ class BaseConductorManager(object):
try:
with task_manager.acquire(context, node_uuid, shared=False,
purpose='start console') as task:
+
notify_utils.emit_console_notification(
task, 'console_restore',
obj_fields.NotificationStatus.START)
diff --git a/ironic/conductor/cleaning.py b/ironic/conductor/cleaning.py
index e3151d4b8..9e4edb809 100644
--- a/ironic/conductor/cleaning.py
+++ b/ironic/conductor/cleaning.py
@@ -69,7 +69,7 @@ def do_node_clean(task, clean_steps=None, disable_ramdisk=False):
task.driver.power.validate(task)
if not disable_ramdisk:
task.driver.network.validate(task)
- except exception.InvalidParameterValue as e:
+ except (exception.InvalidParameterValue, exception.NetworkError) as e:
msg = (_('Validation of node %(node)s for cleaning failed: %(msg)s') %
{'node': node.uuid, 'msg': e})
return utils.cleaning_error_handler(task, msg)
@@ -114,8 +114,9 @@ def do_node_clean(task, clean_steps=None, disable_ramdisk=False):
try:
conductor_steps.set_node_cleaning_steps(
task, disable_ramdisk=disable_ramdisk)
- except (exception.InvalidParameterValue,
- exception.NodeCleaningFailure) as e:
+ except Exception as e:
+ # Catch all exceptions and follow the error handling
+ # path so things are cleaned up properly.
msg = (_('Cannot clean node %(node)s: %(msg)s')
% {'node': node.uuid, 'msg': e})
return utils.cleaning_error_handler(task, msg)
@@ -247,12 +248,21 @@ def do_next_clean_step(task, step_index, disable_ramdisk=None):
task.process_event(event)
+def get_last_error(node):
+ last_error = _('By request, the clean operation was aborted')
+ if node.clean_step:
+ last_error += (
+ _(' during or after the completion of step "%s"')
+ % conductor_steps.step_id(node.clean_step)
+ )
+ return last_error
+
+
@task_manager.require_exclusive_lock
-def do_node_clean_abort(task, step_name=None):
+def do_node_clean_abort(task):
"""Internal method to abort an ongoing operation.
:param task: a TaskManager instance with an exclusive lock
- :param step_name: The name of the clean step.
"""
node = task.node
try:
@@ -270,12 +280,13 @@ def do_node_clean_abort(task, step_name=None):
set_fail_state=False)
return
+ last_error = get_last_error(node)
info_message = _('Clean operation aborted for node %s') % node.uuid
- last_error = _('By request, the clean operation was aborted')
- if step_name:
- msg = _(' after the completion of step "%s"') % step_name
- last_error += msg
- info_message += msg
+ if node.clean_step:
+ info_message += (
+ _(' during or after the completion of step "%s"')
+ % node.clean_step
+ )
node.last_error = last_error
node.clean_step = None
@@ -317,7 +328,7 @@ def continue_node_clean(task):
target_state = None
task.process_event('fail', target_state=target_state)
- do_node_clean_abort(task, step_name)
+ do_node_clean_abort(task)
return
LOG.debug('The cleaning operation for node %(node)s was '
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 13d11d1d9..74e3192cf 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -73,6 +73,7 @@ from ironic.conf import CONF
from ironic.drivers import base as drivers_base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
+from ironic.drivers.modules import inspect_utils
from ironic import objects
from ironic.objects import base as objects_base
from ironic.objects import fields
@@ -97,6 +98,8 @@ class ConductorManager(base_manager.BaseConductorManager):
def __init__(self, host, topic):
super(ConductorManager, self).__init__(host, topic)
+ # NOTE(TheJulia): This is less a metric-able count, but a means to
+ # sort out nodes and prioritise a subset (of non-responding nodes).
self.power_state_sync_count = collections.defaultdict(int)
@METRICS.timer('ConductorManager._clean_up_caches')
@@ -886,7 +889,8 @@ class ConductorManager(base_manager.BaseConductorManager):
exception.NodeInMaintenance,
exception.InstanceDeployFailure,
exception.InvalidStateRequested,
- exception.NodeProtected)
+ exception.NodeProtected,
+ exception.ConcurrentActionLimit)
def do_node_deploy(self, context, node_id, rebuild=False,
configdrive=None, deploy_steps=None):
"""RPC method to initiate deployment to a node.
@@ -910,8 +914,11 @@ class ConductorManager(base_manager.BaseConductorManager):
:raises: InvalidStateRequested when the requested state is not a valid
target from the current state.
:raises: NodeProtected if the node is protected.
+ :raises: ConcurrentActionLimit if this action would exceed the maximum
+ number of configured concurrent actions of this type.
"""
LOG.debug("RPC do_node_deploy called for node %s.", node_id)
+ self._concurrent_action_limit(action='provisioning')
event = 'rebuild' if rebuild else 'deploy'
# NOTE(comstud): If the _sync_power_states() periodic task happens
@@ -983,7 +990,8 @@ class ConductorManager(base_manager.BaseConductorManager):
exception.NodeLocked,
exception.InstanceDeployFailure,
exception.InvalidStateRequested,
- exception.NodeProtected)
+ exception.NodeProtected,
+ exception.ConcurrentActionLimit)
def do_node_tear_down(self, context, node_id):
"""RPC method to tear down an existing node deployment.
@@ -998,8 +1006,11 @@ class ConductorManager(base_manager.BaseConductorManager):
:raises: InvalidStateRequested when the requested state is not a valid
target from the current state.
:raises: NodeProtected if the node is protected.
+ :raises: ConcurrentActionLimit if this action would exceed the maximum
+ number of configured concurrent actions of this type.
"""
LOG.debug("RPC do_node_tear_down called for node %s.", node_id)
+ self._concurrent_action_limit(action='unprovisioning')
with task_manager.acquire(context, node_id, shared=False,
purpose='node tear down') as task:
@@ -1121,7 +1132,8 @@ class ConductorManager(base_manager.BaseConductorManager):
exception.InvalidStateRequested,
exception.NodeInMaintenance,
exception.NodeLocked,
- exception.NoFreeConductorWorker)
+ exception.NoFreeConductorWorker,
+ exception.ConcurrentActionLimit)
def do_node_clean(self, context, node_id, clean_steps,
disable_ramdisk=False):
"""RPC method to initiate manual cleaning.
@@ -1150,7 +1162,10 @@ class ConductorManager(base_manager.BaseConductorManager):
:raises: NodeLocked if node is locked by another conductor.
:raises: NoFreeConductorWorker when there is no free worker to start
async task.
+ :raises: ConcurrentActionLimit If this action would exceed the
+ configured limits of the deployment.
"""
+ self._concurrent_action_limit(action='cleaning')
with task_manager.acquire(context, node_id, shared=False,
purpose='node manual cleaning') as task:
node = task.node
@@ -1336,7 +1351,8 @@ class ConductorManager(base_manager.BaseConductorManager):
callback=self._spawn_worker,
call_args=(cleaning.do_node_clean_abort, task),
err_handler=utils.provisioning_error_handler,
- target_state=target_state)
+ target_state=target_state,
+ last_error=cleaning.get_last_error(node))
return
if node.provision_state == states.RESCUEWAIT:
@@ -1420,6 +1436,11 @@ class ConductorManager(base_manager.BaseConductorManager):
finally:
waiters.wait_for_all(futures)
+ # report a count of the nodes
+ METRICS.send_gauge(
+ 'ConductorManager.PowerSyncNodesCount',
+ len(nodes))
+
def _sync_power_state_nodes_task(self, context, nodes):
"""Invokes power state sync on nodes from synchronized queue.
@@ -1438,6 +1459,7 @@ class ConductorManager(base_manager.BaseConductorManager):
can do here to avoid failing a brand new deploy to a node that
we've locked here, though.
"""
+
# FIXME(comstud): Since our initial state checks are outside
# of the lock (to try to avoid the lock), some checks are
# repeated after grabbing the lock so we can unlock quickly.
@@ -1484,6 +1506,12 @@ class ConductorManager(base_manager.BaseConductorManager):
LOG.info("During sync_power_state, node %(node)s was not "
"found and presumed deleted by another process.",
{'node': node_uuid})
+ # TODO(TheJulia): The chance exists that we orphan a node
+ # in power_state_sync_count, albeit it is not much data,
+ # it could eventually cause the memory footprint to grow
+ # on an exceptionally large ironic deployment. We should
+ # make sure we clean it up at some point, but overall given
+ # minimal impact, it is definite low hanging fruit.
except exception.NodeLocked:
LOG.info("During sync_power_state, node %(node)s was "
"already locked by another process. Skip.",
@@ -1500,6 +1528,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# regular power state checking, maintenance is still a required
# condition.
filters={'maintenance': True, 'fault': faults.POWER_FAILURE},
+ node_count_metric_name='ConductorManager.PowerSyncRecoveryNodeCount',
)
def _power_failure_recovery(self, task, context):
"""Periodic task to check power states for nodes in maintenance.
@@ -1761,10 +1790,6 @@ class ConductorManager(base_manager.BaseConductorManager):
if task.node.console_enabled:
notify_utils.emit_console_notification(
task, 'console_restore', fields.NotificationStatus.START)
- # NOTE(kaifeng) Clear allocated_ipmi_terminal_port if exists,
- # so current conductor can allocate a new free port from local
- # resources.
- task.node.del_driver_internal_info('allocated_ipmi_terminal_port')
try:
task.driver.console.start_console(task)
except Exception as err:
@@ -1846,6 +1871,7 @@ class ConductorManager(base_manager.BaseConductorManager):
predicate=lambda n, m: n.conductor_affinity != m.conductor.id,
limit=lambda: CONF.conductor.periodic_max_workers,
shared_task=False,
+ node_count_metric_name='ConductorManager.SyncLocalStateNodeCount',
)
def _sync_local_state(self, task, context):
"""Perform any actions necessary to sync local state.
@@ -2011,6 +2037,26 @@ class ConductorManager(base_manager.BaseConductorManager):
node.console_enabled = False
notify_utils.emit_console_notification(
task, 'console_set', fields.NotificationStatus.END)
+ # Destroy Swift Inventory entries for this node
+ try:
+ inspect_utils.clean_up_swift_entries(task)
+ except exception.SwiftObjectStillExists as e:
+ if node.maintenance:
+ # Maintenance -> Allow orphaning
+ LOG.warning('Swift object orphaned during destruction of '
+ 'node %(node)s: %(e)s',
+ {'node': node.uuid, 'e': e})
+ else:
+ LOG.error('Swift object cannot be orphaned without '
+ 'maintenance mode during destruction of node '
+ '%(node)s: %(e)s', {'node': node.uuid, 'e': e})
+ raise
+ except Exception as err:
+ LOG.error('Failed to delete Swift entries related '
+ 'to the node %(node)s: %(err)s.',
+ {'node': node.uuid, 'err': err})
+ raise
+
node.destroy()
LOG.info('Successfully deleted node %(node)s.',
{'node': node.uuid})
@@ -2191,18 +2237,16 @@ class ConductorManager(base_manager.BaseConductorManager):
"""
LOG.debug('RPC set_console_mode called for node %(node)s with '
'enabled %(enabled)s', {'node': node_id, 'enabled': enabled})
-
- with task_manager.acquire(context, node_id, shared=False,
+ with task_manager.acquire(context, node_id, shared=True,
purpose='setting console mode') as task:
node = task.node
-
task.driver.console.validate(task)
-
if enabled == node.console_enabled:
op = 'enabled' if enabled else 'disabled'
LOG.info("No console action was triggered because the "
"console is already %s", op)
else:
+ task.upgrade_lock()
node.last_error = None
node.save()
task.spawn_after(self._spawn_worker,
@@ -2613,14 +2657,63 @@ class ConductorManager(base_manager.BaseConductorManager):
# Yield on every iteration
eventlet.sleep(0)
+ def _sensors_conductor(self, context):
+ """Called to collect and send metrics "sensors" for the conductor."""
+ # populate the message which will be sent to ceilometer
+ # or other data consumer
+ message = {'message_id': uuidutils.generate_uuid(),
+ 'timestamp': datetime.datetime.utcnow(),
+ 'hostname': self.host}
+
+ try:
+ ev_type = 'ironic.metrics'
+ message['event_type'] = ev_type + '.update'
+ sensors_data = METRICS.get_metrics_data()
+ except AttributeError:
+ # TODO(TheJulia): Remove this at some point, but right now
+ # don't inherently break on version mismatches when people
+ # disregard requriements.
+ LOG.warning(
+ 'get_sensors_data has been configured to collect '
+ 'conductor metrics, however the installed ironic-lib '
+ 'library lacks the functionality. Please update '
+ 'ironic-lib to a minimum of version 5.4.0.')
+ except Exception as e:
+ LOG.exception(
+ "An unknown error occured while attempting to collect "
+ "sensor data from within the conductor. Error: %(error)s",
+ {'error': e})
+ else:
+ message['payload'] = (
+ self._filter_out_unsupported_types(sensors_data))
+ if message['payload']:
+ self.sensors_notifier.info(
+ context, ev_type, message)
+
@METRICS.timer('ConductorManager._send_sensor_data')
- @periodics.periodic(spacing=CONF.conductor.send_sensor_data_interval,
- enabled=CONF.conductor.send_sensor_data)
+ @periodics.periodic(spacing=CONF.sensor_data.interval,
+ enabled=CONF.sensor_data.send_sensor_data)
def _send_sensor_data(self, context):
"""Periodically collects and transmits sensor data notifications."""
+ if CONF.sensor_data.enable_for_conductor:
+ if CONF.sensor_data.workers == 1:
+ # Directly call the sensors_conductor when only one
+ # worker is permitted, so we collect data serially
+ # instead.
+ self._sensors_conductor(context)
+ else:
+ # Also, do not apply the general threshold limit to
+ # the self collection of "sensor" data from the conductor,
+ # as were not launching external processes, we're just reading
+ # from an internal data structure, if we can.
+ self._spawn_worker(self._sensors_conductor, context)
+ if not CONF.sensor_data.enable_for_nodes:
+ # NOTE(TheJulia): If node sensor data is not required, then
+ # skip the rest of this method.
+ return
filters = {}
- if not CONF.conductor.send_sensor_data_for_undeployed_nodes:
+ if not CONF.sensor_data.enable_for_undeployed_nodes:
filters['provision_state'] = states.ACTIVE
nodes = queue.Queue()
@@ -2628,7 +2721,7 @@ class ConductorManager(base_manager.BaseConductorManager):
filters=filters):
nodes.put_nowait(node_info)
- number_of_threads = min(CONF.conductor.send_sensor_data_workers,
+ number_of_threads = min(CONF.sensor_data.workers,
nodes.qsize())
futures = []
for thread_number in range(number_of_threads):
@@ -2644,7 +2737,7 @@ class ConductorManager(base_manager.BaseConductorManager):
break
done, not_done = waiters.wait_for_all(
- futures, timeout=CONF.conductor.send_sensor_data_wait_timeout)
+ futures, timeout=CONF.sensor_data.wait_timeout)
if not_done:
LOG.warning("%d workers for send sensors data did not complete",
len(not_done))
@@ -2653,13 +2746,14 @@ class ConductorManager(base_manager.BaseConductorManager):
"""Filters out sensor data types that aren't specified in the config.
Removes sensor data types that aren't specified in
- CONF.conductor.send_sensor_data_types.
+ CONF.sensor_data.data_types.
:param sensors_data: dict containing sensor types and the associated
data
:returns: dict with unsupported sensor types removed
"""
- allowed = set(x.lower() for x in CONF.conductor.send_sensor_data_types)
+ allowed = set(x.lower() for x in
+ CONF.sensor_data.data_types)
if 'all' in allowed:
return sensors_data
@@ -3457,7 +3551,6 @@ class ConductorManager(base_manager.BaseConductorManager):
self.conductor.id):
# Another conductor has taken over, skipping
continue
-
LOG.debug('Taking over allocation %s', allocation.uuid)
allocations.do_allocate(context, allocation)
except Exception:
@@ -3549,6 +3642,40 @@ class ConductorManager(base_manager.BaseConductorManager):
# impact DB access if done in excess.
eventlet.sleep(0)
+ def _concurrent_action_limit(self, action):
+ """Check Concurrency limits and block operations if needed.
+
+ This method is used to serve as a central place for the logic
+ for checks on concurrency limits. If a limit is reached, then
+ an appropriate exception is raised.
+
+ :raises: ConcurrentActionLimit If the system configuration
+ is exceeded.
+ """
+ # NOTE(TheJulia): Keeping this all in one place for simplicity.
+ if action == 'provisioning':
+ node_count = self.dbapi.count_nodes_in_provision_state([
+ states.DEPLOYING,
+ states.DEPLOYWAIT
+ ])
+ if node_count >= CONF.conductor.max_concurrent_deploy:
+ raise exception.ConcurrentActionLimit(
+ task_type=action)
+
+ if action == 'unprovisioning' or action == 'cleaning':
+ # NOTE(TheJulia): This also checks for the deleting state
+ # which is super transitory, *but* you can get a node into
+ # the state. So in order to guard against a DoS attack, we
+ # need to check even the super transitory node state.
+ node_count = self.dbapi.count_nodes_in_provision_state([
+ states.DELETING,
+ states.CLEANING,
+ states.CLEANWAIT
+ ])
+ if node_count >= CONF.conductor.max_concurrent_clean:
+ raise exception.ConcurrentActionLimit(
+ task_type=action)
+
@METRICS.timer('get_vendor_passthru_metadata')
def get_vendor_passthru_metadata(route_dict):
diff --git a/ironic/conductor/periodics.py b/ironic/conductor/periodics.py
index 70bc7bc93..b9c8f8844 100644
--- a/ironic/conductor/periodics.py
+++ b/ironic/conductor/periodics.py
@@ -18,6 +18,7 @@ import inspect
import eventlet
from futurist import periodics
+from ironic_lib import metrics_utils
from oslo_log import log
from ironic.common import exception
@@ -29,6 +30,9 @@ from ironic.drivers import base as driver_base
LOG = log.getLogger(__name__)
+METRICS = metrics_utils.get_metrics_logger(__name__)
+
+
def periodic(spacing, enabled=True, **kwargs):
"""A decorator to define a periodic task.
@@ -46,7 +50,7 @@ class Stop(Exception):
def node_periodic(purpose, spacing, enabled=True, filters=None,
predicate=None, predicate_extra_fields=(), limit=None,
- shared_task=True):
+ shared_task=True, node_count_metric_name=None):
"""A decorator to define a periodic task to act on nodes.
Defines a periodic task that fetches the list of nodes mapped to the
@@ -84,6 +88,9 @@ def node_periodic(purpose, spacing, enabled=True, filters=None,
iteration to determine the limit.
:param shared_task: if ``True``, the task will have a shared lock. It is
recommended to start with a shared lock and upgrade it only if needed.
+ :param node_count_metric_name: A string value to identify a metric
+ representing the count of matching nodes to be recorded upon the
+ completion of the periodic.
"""
node_type = collections.namedtuple(
'Node',
@@ -116,10 +123,11 @@ def node_periodic(purpose, spacing, enabled=True, filters=None,
else:
local_limit = limit
assert local_limit is None or local_limit > 0
-
+ node_count = 0
nodes = manager.iter_nodes(filters=filters,
fields=predicate_extra_fields)
for (node_uuid, *other) in nodes:
+ node_count += 1
if predicate is not None:
node = node_type(node_uuid, *other)
if accepts_manager:
@@ -158,6 +166,11 @@ def node_periodic(purpose, spacing, enabled=True, filters=None,
local_limit -= 1
if not local_limit:
return
+ if node_count_metric_name:
+ # Send post-run metrics.
+ METRICS.send_gauge(
+ node_count_metric_name,
+ node_count)
return wrapper
diff --git a/ironic/conductor/steps.py b/ironic/conductor/steps.py
index 252b094a9..5a0fdd7b4 100644
--- a/ironic/conductor/steps.py
+++ b/ironic/conductor/steps.py
@@ -194,9 +194,9 @@ def _get_cleaning_steps(task, enabled=False, sort=True):
sort_step_key=sort_key,
prio_overrides=csp_override)
- LOG.debug("cleaning_steps after applying "
- "clean_step_priority_override for node %(node)s: %(step)s",
- task.node.uuid, cleaning_steps)
+ LOG.debug('cleaning_steps after applying '
+ 'clean_step_priority_override for node %(node)s: %(steps)s',
+ {'node': task.node.uuid, 'steps': cleaning_steps})
else:
cleaning_steps = _get_steps(task, CLEANING_INTERFACE_PRIORITY,
'get_clean_steps', enabled=enabled,
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index 509c9ce92..922e74cf6 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -527,7 +527,8 @@ class TaskManager(object):
self.release_resources()
def process_event(self, event, callback=None, call_args=None,
- call_kwargs=None, err_handler=None, target_state=None):
+ call_kwargs=None, err_handler=None, target_state=None,
+ last_error=None):
"""Process the given event for the task's current state.
:param event: the name of the event to process
@@ -540,6 +541,8 @@ class TaskManager(object):
prev_target_state)
:param target_state: if specified, the target provision state for the
node. Otherwise, use the target state from the fsm
+ :param last_error: last error to set on the node together with
+ the state transition.
:raises: InvalidState if the event is not allowed by the associated
state machine
"""
@@ -572,13 +575,15 @@ class TaskManager(object):
# set up the async worker
if callback:
- # clear the error if we're going to start work in a callback
- self.node.last_error = None
+ # update the error if we're going to start work in a callback
+ self.node.last_error = last_error
if call_args is None:
call_args = ()
if call_kwargs is None:
call_kwargs = {}
self.spawn_after(callback, *call_args, **call_kwargs)
+ elif last_error is not None:
+ self.node.last_error = last_error
# publish the state transition by saving the Node
self.node.save()
diff --git a/ironic/conductor/utils.py b/ironic/conductor/utils.py
index c107f076f..cdf3a99ee 100644
--- a/ironic/conductor/utils.py
+++ b/ironic/conductor/utils.py
@@ -302,9 +302,11 @@ def node_power_action(task, new_state, timeout=None):
# Set the target_power_state and clear any last_error, if we're
# starting a new operation. This will expose to other processes
- # and clients that work is in progress.
- node['target_power_state'] = target_state
- node['last_error'] = None
+ # and clients that work is in progress. Keep the last_error intact
+ # if the power action happens as a result of a failure.
+ node.target_power_state = target_state
+ if node.provision_state not in states.FAILURE_STATES:
+ node.last_error = None
node.timestamp_driver_internal_info('last_power_state_change')
# NOTE(dtantsur): wipe token on shutting down, otherwise a reboot in
# fast-track (or an accidentally booted agent) will cause subsequent
diff --git a/ironic/conf/__init__.py b/ironic/conf/__init__.py
index 4e4b7bf7a..648395362 100644
--- a/ironic/conf/__init__.py
+++ b/ironic/conf/__init__.py
@@ -27,12 +27,15 @@ from ironic.conf import database
from ironic.conf import default
from ironic.conf import deploy
from ironic.conf import dhcp
+from ironic.conf import dnsmasq
from ironic.conf import drac
+from ironic.conf import fake
from ironic.conf import glance
from ironic.conf import healthcheck
from ironic.conf import ibmc
from ironic.conf import ilo
from ironic.conf import inspector
+from ironic.conf import inventory
from ironic.conf import ipmi
from ironic.conf import irmc
from ironic.conf import metrics
@@ -42,6 +45,7 @@ from ironic.conf import neutron
from ironic.conf import nova
from ironic.conf import pxe
from ironic.conf import redfish
+from ironic.conf import sensor_data
from ironic.conf import service_catalog
from ironic.conf import snmp
from ironic.conf import swift
@@ -62,11 +66,14 @@ default.register_opts(CONF)
deploy.register_opts(CONF)
drac.register_opts(CONF)
dhcp.register_opts(CONF)
+dnsmasq.register_opts(CONF)
+fake.register_opts(CONF)
glance.register_opts(CONF)
healthcheck.register_opts(CONF)
ibmc.register_opts(CONF)
ilo.register_opts(CONF)
inspector.register_opts(CONF)
+inventory.register_opts(CONF)
ipmi.register_opts(CONF)
irmc.register_opts(CONF)
metrics.register_opts(CONF)
@@ -76,6 +83,7 @@ neutron.register_opts(CONF)
nova.register_opts(CONF)
pxe.register_opts(CONF)
redfish.register_opts(CONF)
+sensor_data.register_opts(CONF)
service_catalog.register_opts(CONF)
snmp.register_opts(CONF)
swift.register_opts(CONF)
diff --git a/ironic/conf/anaconda.py b/ironic/conf/anaconda.py
index 8ae3ab533..4f230ecdc 100644
--- a/ironic/conf/anaconda.py
+++ b/ironic/conf/anaconda.py
@@ -28,6 +28,17 @@ opts = [
help=_('kickstart template to use when no kickstart template '
'is specified in the instance_info or the glance OS '
'image.')),
+ cfg.BoolOpt('insecure_heartbeat',
+ default=False,
+ mutable=True,
+ help=_('Option to allow the kickstart configuration to be '
+ 'informed if SSL/TLS certificate verificaiton should '
+ 'be enforced, or not. This option exists largely to '
+ 'facilitate easy testing and use of the ``anaconda`` '
+ 'deployment interface. When this option is set, '
+ 'heartbeat operations, depending on the contents of '
+ 'the utilized kickstart template, may not enfore TLS '
+ 'certificate verification.')),
]
diff --git a/ironic/conf/api.py b/ironic/conf/api.py
index 2b0e9a824..cf59fa006 100644
--- a/ironic/conf/api.py
+++ b/ironic/conf/api.py
@@ -86,6 +86,11 @@ opts = [
'network_data_schema',
default='$pybasedir/api/controllers/v1/network-data-schema.json',
help=_("Schema for network data used by this deployment.")),
+ cfg.BoolOpt('project_admin_can_manage_own_nodes',
+ default=True,
+ mutable=True,
+ help=_('If a project scoped administrative user is permitted '
+ 'to create/delte baremetal nodes in their project.')),
]
opt_group = cfg.OptGroup(name='api',
diff --git a/ironic/conf/conductor.py b/ironic/conf/conductor.py
index b1d6bae4f..653e30f56 100644
--- a/ironic/conf/conductor.py
+++ b/ironic/conf/conductor.py
@@ -97,41 +97,6 @@ opts = [
cfg.IntOpt('node_locked_retry_interval',
default=1,
help=_('Seconds to sleep between node lock attempts.')),
- cfg.BoolOpt('send_sensor_data',
- default=False,
- help=_('Enable sending sensor data message via the '
- 'notification bus')),
- cfg.IntOpt('send_sensor_data_interval',
- default=600,
- min=1,
- help=_('Seconds between conductor sending sensor data message '
- 'to ceilometer via the notification bus.')),
- cfg.IntOpt('send_sensor_data_workers',
- default=4, min=1,
- help=_('The maximum number of workers that can be started '
- 'simultaneously for send data from sensors periodic '
- 'task.')),
- cfg.IntOpt('send_sensor_data_wait_timeout',
- default=300,
- help=_('The time in seconds to wait for send sensors data '
- 'periodic task to be finished before allowing periodic '
- 'call to happen again. Should be less than '
- 'send_sensor_data_interval value.')),
- cfg.ListOpt('send_sensor_data_types',
- default=['ALL'],
- help=_('List of comma separated meter types which need to be'
- ' sent to Ceilometer. The default value, "ALL", is a '
- 'special value meaning send all the sensor data.')),
- cfg.BoolOpt('send_sensor_data_for_undeployed_nodes',
- default=False,
- help=_('The default for sensor data collection is to only '
- 'collect data for machines that are deployed, however '
- 'operators may desire to know if there are failures '
- 'in hardware that is not presently in use. '
- 'When set to true, the conductor will collect sensor '
- 'information from all nodes when sensor data '
- 'collection is enabled via the send_sensor_data '
- 'setting.')),
cfg.IntOpt('sync_local_state_interval',
default=180,
help=_('When conductors join or leave the cluster, existing '
@@ -358,6 +323,32 @@ opts = [
'model. The conductor does *not* record this value '
'otherwise, and this information is not backfilled '
'for prior instances which have been deployed.')),
+ cfg.IntOpt('max_concurrent_deploy',
+ default=250,
+ min=1,
+ mutable=True,
+ help=_('The maximum number of concurrent nodes in deployment '
+ 'which are permitted in this Ironic system. '
+ 'If this limit is reached, new requests will be '
+ 'rejected until the number of deployments in progress '
+ 'is lower than this maximum. As this is a security '
+ 'mechanism requests are not queued, and this setting '
+ 'is a global setting applying to all requests this '
+ 'conductor receives, regardless of access rights. '
+ 'The concurrent deployment limit cannot be disabled.')),
+ cfg.IntOpt('max_concurrent_clean',
+ default=50,
+ min=1,
+ mutable=True,
+ help=_('The maximum number of concurrent nodes in cleaning '
+ 'which are permitted in this Ironic system. '
+ 'If this limit is reached, new requests will be '
+ 'rejected until the number of nodes in cleaning '
+ 'is lower than this maximum. As this is a security '
+ 'mechanism requests are not queued, and this setting '
+ 'is a global setting applying to all requests this '
+ 'conductor receives, regardless of access rights. '
+ 'The concurrent clean limit cannot be disabled.')),
]
diff --git a/ironic/conf/default.py b/ironic/conf/default.py
index 0e3c32bd1..c7aff69cc 100644
--- a/ironic/conf/default.py
+++ b/ironic/conf/default.py
@@ -216,7 +216,7 @@ image_opts = [
'common/isolinux_config.template'),
help=_('Template file for isolinux configuration file.')),
cfg.StrOpt('grub_config_path',
- default='/boot/grub/grub.cfg',
+ default='EFI/BOOT/grub.cfg',
help=_('GRUB2 configuration file location on the UEFI ISO '
'images produced by ironic. The default value is '
'usually incorrect and should not be relied on. '
diff --git a/ironic/conf/deploy.py b/ironic/conf/deploy.py
index 6ae080c83..ff2020105 100644
--- a/ironic/conf/deploy.py
+++ b/ironic/conf/deploy.py
@@ -108,7 +108,7 @@ opts = [
'state. If True, shred will be invoked and cleaning '
'will continue.')),
cfg.IntOpt('disk_erasure_concurrency',
- default=1,
+ default=4,
min=1,
mutable=True,
help=_('Defines the target pool size used by Ironic Python '
@@ -133,9 +133,7 @@ opts = [
'to set an explicit value for this option, and if the '
'setting or default differs from nodes, to ensure that '
'nodes are configured specifically for their desired '
- 'boot mode. This option '
- 'only has effect when management interface supports '
- 'boot mode management') % {
+ 'boot mode.') % {
'bios': boot_modes.LEGACY_BIOS,
'uefi': boot_modes.UEFI}),
cfg.BoolOpt('configdrive_use_object_store',
diff --git a/ironic/conf/dhcp.py b/ironic/conf/dhcp.py
index 2c58529fd..17a937f7d 100644
--- a/ironic/conf/dhcp.py
+++ b/ironic/conf/dhcp.py
@@ -20,7 +20,8 @@ from ironic.common.i18n import _
opts = [
cfg.StrOpt('dhcp_provider',
default='neutron',
- help=_('DHCP provider to use. "neutron" uses Neutron, and '
+ help=_('DHCP provider to use. "neutron" uses Neutron, '
+ '"dnsmasq" uses the Dnsmasq provider, and '
'"none" uses a no-op provider.')),
]
diff --git a/ironic/conf/dnsmasq.py b/ironic/conf/dnsmasq.py
new file mode 100644
index 000000000..f1ba1de23
--- /dev/null
+++ b/ironic/conf/dnsmasq.py
@@ -0,0 +1,43 @@
+#
+# Copyright 2022 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from ironic.common.i18n import _
+
+opts = [
+ cfg.StrOpt('dhcp_optsdir',
+ default='/etc/dnsmasq.d/optsdir.d',
+ help=_('Directory where the "dnsmasq" provider will write '
+ 'option configuration files for an external '
+ 'Dnsmasq to read. Use the same path for the '
+ 'dhcp-optsdir dnsmasq configuration directive.')),
+ cfg.StrOpt('dhcp_hostsdir',
+ default='/etc/dnsmasq.d/hostsdir.d',
+ help=_('Directory where the "dnsmasq" provider will write '
+ 'host configuration files for an external '
+ 'Dnsmasq to read. Use the same path for the '
+ 'dhcp-hostsdir dnsmasq configuration directive.')),
+ cfg.StrOpt('dhcp_leasefile',
+ default='/var/lib/dnsmasq/dnsmasq.leases',
+ help=_('Dnsmasq leases file for the "dnsmasq" driver to '
+ 'discover IP addresses of managed nodes. Use the'
+ 'same path for the dhcp-leasefile dnsmasq '
+ 'configuration directive.')),
+]
+
+
+def register_opts(conf):
+ conf.register_opts(opts, group='dnsmasq')
diff --git a/ironic/conf/fake.py b/ironic/conf/fake.py
new file mode 100644
index 000000000..8f6d75ee3
--- /dev/null
+++ b/ironic/conf/fake.py
@@ -0,0 +1,85 @@
+#
+# Copyright 2022 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from ironic.common.i18n import _
+
+opts = [
+ cfg.StrOpt('power_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'power driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('boot_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'boot driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('deploy_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'deploy driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('vendor_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'vendor driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('management_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'management driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('inspect_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'inspect driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('raid_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'raid driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('bios_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'bios driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('storage_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'storage driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+ cfg.StrOpt('rescue_delay',
+ default='0',
+ help=_('Delay in seconds for operations with the fake '
+ 'rescue driver. Two comma-delimited values will '
+ 'result in a delay with a triangular random '
+ 'distribution, weighted on the first value.')),
+]
+
+
+def register_opts(conf):
+ conf.register_opts(opts, group='fake')
diff --git a/ironic/conf/glance.py b/ironic/conf/glance.py
index a3286b1eb..317f213bc 100644
--- a/ironic/conf/glance.py
+++ b/ironic/conf/glance.py
@@ -114,6 +114,7 @@ opts = [
'will determine how many containers are created.')),
cfg.IntOpt('num_retries',
default=0,
+ mutable=True,
help=_('Number of retries when downloading an image from '
'glance.')),
]
diff --git a/ironic/conf/ilo.py b/ironic/conf/ilo.py
index 364c64c81..197378ce7 100644
--- a/ironic/conf/ilo.py
+++ b/ironic/conf/ilo.py
@@ -120,6 +120,11 @@ opts = [
'/proc/cmdline. Mind severe cmdline size limit! Can be '
'overridden by `instance_info/kernel_append_params` '
'property.')),
+ cfg.StrOpt('cert_path',
+ default='/var/lib/ironic/ilo/',
+ help=_('On the ironic-conductor node, directory where ilo '
+ 'driver stores the CSR and the cert.')),
+
]
diff --git a/ironic/conf/inventory.py b/ironic/conf/inventory.py
new file mode 100644
index 000000000..52f31bf60
--- /dev/null
+++ b/ironic/conf/inventory.py
@@ -0,0 +1,34 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from ironic.common.i18n import _
+
+opts = [
+ cfg.StrOpt('data_backend',
+ help=_('The storage backend for storing introspection data.'),
+ choices=[('none', _('introspection data will not be stored')),
+ ('database', _('introspection data stored in an SQL '
+ 'database')),
+ ('swift', _('introspection data stored in Swift'))],
+ default='database'),
+ cfg.StrOpt('swift_data_container',
+ default='introspection_data_container',
+ help=_('The Swift introspection data container to store '
+ 'the inventory data.')),
+]
+
+
+def register_opts(conf):
+ conf.register_opts(opts, group='inventory')
diff --git a/ironic/conf/irmc.py b/ironic/conf/irmc.py
index 7c319e2d8..68ee43b3a 100644
--- a/ironic/conf/irmc.py
+++ b/ironic/conf/irmc.py
@@ -81,9 +81,20 @@ opts = [
help='SNMP polling interval in seconds'),
cfg.StrOpt('snmp_auth_proto',
default='sha',
- choices=[('sha', _('Secure Hash Algorithm 1'))],
+ choices=[('sha', _('Secure Hash Algorithm 1, supported in iRMC '
+ 'S4 and S5.')),
+ ('sha256', ('Secure Hash Algorithm 2 with 256 bits '
+ 'digest, only supported in iRMC S6.')),
+ ('sha384', ('Secure Hash Algorithm 2 with 384 bits '
+ 'digest, only supported in iRMC S6.')),
+ ('sha512', ('Secure Hash Algorithm 2 with 512 bits '
+ 'digest, only supported in iRMC S6.'))],
help=_("SNMPv3 message authentication protocol ID. "
- "Required for version 'v3'. 'sha' is supported.")),
+ "Required for version 'v3'. The valid options are "
+ "'sha', 'sha256', 'sha384' and 'sha512', while 'sha' is "
+ "the only supported protocol in iRMC S4 and S5, and "
+ "from iRMC S6, 'sha256', 'sha384' and 'sha512' are "
+ "supported, but 'sha' is not supported any more.")),
cfg.StrOpt('snmp_priv_proto',
default='aes',
choices=[('aes', _('Advanced Encryption Standard'))],
diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py
index 7576264ee..a7ebcfb30 100644
--- a/ironic/conf/opts.py
+++ b/ironic/conf/opts.py
@@ -32,6 +32,7 @@ _opts = [
('healthcheck', ironic.conf.healthcheck.opts),
('ilo', ironic.conf.ilo.opts),
('inspector', ironic.conf.inspector.list_opts()),
+ ('inventory', ironic.conf.inventory.opts),
('ipmi', ironic.conf.ipmi.opts),
('irmc', ironic.conf.irmc.opts),
('anaconda', ironic.conf.anaconda.opts),
@@ -42,6 +43,7 @@ _opts = [
('nova', ironic.conf.nova.list_opts()),
('pxe', ironic.conf.pxe.opts),
('redfish', ironic.conf.redfish.opts),
+ ('sensor_data', ironic.conf.sensor_data.opts),
('service_catalog', ironic.conf.service_catalog.list_opts()),
('snmp', ironic.conf.snmp.opts),
('swift', ironic.conf.swift.list_opts()),
@@ -88,5 +90,8 @@ def update_opt_defaults():
'openstack=WARNING',
# Policy logging is not necessarily useless, but very verbose
'oslo_policy=WARNING',
+ # Concurrency lock logging is not bad, but exceptionally noisy
+ # and typically not needed in debugging Ironic itself.
+ 'oslo_concurrency.lockutils=WARNING',
]
)
diff --git a/ironic/conf/sensor_data.py b/ironic/conf/sensor_data.py
new file mode 100644
index 000000000..8527113a6
--- /dev/null
+++ b/ironic/conf/sensor_data.py
@@ -0,0 +1,89 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from ironic.common.i18n import _
+
+opts = [
+ cfg.BoolOpt('send_sensor_data',
+ default=False,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data',
+ help=_('Enable sending sensor data message via the '
+ 'notification bus.')),
+ cfg.IntOpt('interval',
+ default=600,
+ min=1,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_interval',
+ help=_('Seconds between conductor sending sensor data message '
+ 'via the notification bus. This was originally for '
+ 'consumption via ceilometer, but the data may also '
+ 'be consumed via a plugin like '
+ 'ironic-prometheus-exporter or any other message bus '
+ 'data collector.')),
+ cfg.IntOpt('workers',
+ default=4, min=1,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_workers',
+ help=_('The maximum number of workers that can be started '
+ 'simultaneously for send data from sensors periodic '
+ 'task.')),
+ cfg.IntOpt('wait_timeout',
+ default=300,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_wait_timeout',
+ help=_('The time in seconds to wait for send sensors data '
+ 'periodic task to be finished before allowing periodic '
+ 'call to happen again. Should be less than '
+ 'send_sensor_data_interval value.')),
+ cfg.ListOpt('data_types',
+ default=['ALL'],
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_types',
+ help=_('List of comma separated meter types which need to be '
+ 'sent to Ceilometer. The default value, "ALL", is a '
+ 'special value meaning send all the sensor data. '
+ 'This setting only applies to baremetal sensor data '
+ 'being processed through the conductor.')),
+ cfg.BoolOpt('enable_for_undeployed_nodes',
+ default=False,
+ deprecated_group='conductor',
+ deprecated_name='send_sensor_data_for_undeployed_nodes',
+ help=_('The default for sensor data collection is to only '
+ 'collect data for machines that are deployed, however '
+ 'operators may desire to know if there are failures '
+ 'in hardware that is not presently in use. '
+ 'When set to true, the conductor will collect sensor '
+ 'information from all nodes when sensor data '
+ 'collection is enabled via the send_sensor_data '
+ 'setting.')),
+ cfg.BoolOpt('enable_for_conductor',
+ default=True,
+ help=_('If to include sensor metric data for the Conductor '
+ 'process itself in the message payload for sensor '
+ 'data which allows operators to gather instance '
+ 'counts of actions and states to better manage '
+ 'the deployment.')),
+ cfg.BoolOpt('enable_for_nodes',
+ default=True,
+ help=_('If to transmit any sensor data for any nodes under '
+ 'this conductor\'s management. This option superceeds '
+ 'the ``send_sensor_data_for_undeployed_nodes`` '
+ 'setting.')),
+]
+
+
+def register_opts(conf):
+ conf.register_opts(opts, group='sensor_data')
diff --git a/ironic/db/api.py b/ironic/db/api.py
index 712919bb3..42839aa74 100644
--- a/ironic/db/api.py
+++ b/ironic/db/api.py
@@ -72,6 +72,7 @@ class Connection(object, metaclass=abc.ABCMeta):
:reserved_by_any_of: [conductor1, conductor2]
:resource_class: resource class name
:retired: True | False
+ :shard_in: shard (multiple possibilities)
:provision_state: provision state of node
:provision_state_in:
provision state of node (multiple possibilities)
@@ -106,6 +107,7 @@ class Connection(object, metaclass=abc.ABCMeta):
:provisioned_before:
nodes with provision_updated_at field before this
interval in seconds
+ :shard: nodes with the given shard
:param limit: Maximum number of nodes to return.
:param marker: the last item of the previous page; we return the next
result set.
@@ -295,6 +297,14 @@ class Connection(object, metaclass=abc.ABCMeta):
"""
@abc.abstractmethod
+ def get_ports_by_shards(self, shards, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of ports contained in the provided shards.
+
+ :param shard_ids: A list of shards to filter ports by.
+ """
+
+ @abc.abstractmethod
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""List all the ports for a given node.
@@ -1416,3 +1426,41 @@ class Connection(object, metaclass=abc.ABCMeta):
:param entires: A list of node history entriy id's to be
queried for deletion.
"""
+
+ @abc.abstractmethod
+ def count_nodes_in_provision_state(self, state):
+ """Count the number of nodes in given provision state.
+
+ :param state: A provision_state value to match for the
+ count operation. This can be a single provision
+ state value or a list of values.
+ """
+
+ @abc.abstractmethod
+ def create_node_inventory(self, values):
+ """Create a new inventory record.
+
+ :param values: Dict of values.
+ """
+
+ @abc.abstractmethod
+ def destroy_node_inventory_by_node_id(self, inventory_node_id):
+ """Destroy a inventory record.
+
+ :param inventory_uuid: The uuid of a inventory record
+ """
+
+ @abc.abstractmethod
+ def get_node_inventory_by_node_id(self, node_id):
+ """Get the node inventory for a given node.
+
+ :param node_id: The integer node ID.
+ :returns: An inventory of a node.
+ """
+
+ @abc.abstractmethod
+ def get_shard_list(self):
+ """Retrieve a list of shards.
+
+ :returns: list of dicts containing shard names and count
+ """
diff --git a/ironic/db/sqlalchemy/__init__.py b/ironic/db/sqlalchemy/__init__.py
index 0f792361a..173b91fcc 100644
--- a/ironic/db/sqlalchemy/__init__.py
+++ b/ironic/db/sqlalchemy/__init__.py
@@ -12,5 +12,7 @@
from oslo_db.sqlalchemy import enginefacade
+# FIXME(stephenfin): we need to remove reliance on autocommit semantics ASAP
+# since it's not compatible with SQLAlchemy 2.0
# NOTE(dtantsur): we want sqlite as close to a real database as possible.
-enginefacade.configure(sqlite_fk=True)
+enginefacade.configure(sqlite_fk=True, __autocommit=True)
diff --git a/ironic/db/sqlalchemy/alembic/versions/0ac0f39bc5aa_add_node_inventory_table.py b/ironic/db/sqlalchemy/alembic/versions/0ac0f39bc5aa_add_node_inventory_table.py
new file mode 100644
index 000000000..c6a12f6dd
--- /dev/null
+++ b/ironic/db/sqlalchemy/alembic/versions/0ac0f39bc5aa_add_node_inventory_table.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""add node inventory table
+
+Revision ID: 0ac0f39bc5aa
+Revises: 9ef41f07cb58
+Create Date: 2022-10-25 17:15:38.181544
+
+"""
+
+from alembic import op
+from oslo_db.sqlalchemy import types as db_types
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision = '0ac0f39bc5aa'
+down_revision = '9ef41f07cb58'
+
+
+def upgrade():
+ op.create_table('node_inventory',
+ sa.Column('version', sa.String(length=15), nullable=True),
+ sa.Column('created_at', sa.DateTime(), nullable=True),
+ sa.Column('updated_at', sa.DateTime(), nullable=True),
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('inventory_data', db_types.JsonEncodedDict(
+ mysql_as_long=True).impl, nullable=True),
+ sa.Column('plugin_data', db_types.JsonEncodedDict(
+ mysql_as_long=True).impl, nullable=True),
+ sa.Column('node_id', sa.Integer(), nullable=True),
+ sa.PrimaryKeyConstraint('id'),
+ sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
+ sa.Index('inventory_node_id_idx', 'node_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3')
diff --git a/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py b/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
index d47a3d131..1587dc94a 100644
--- a/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
+++ b/ironic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
@@ -38,8 +38,8 @@ def upgrade():
sa.Column('drivers', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('hostname', name='uniq_conductors0hostname'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_charset='UTF8MB3',
+ mysql_engine='InnoDB',
)
op.create_table(
'chassis',
@@ -51,8 +51,8 @@ def upgrade():
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_table(
'nodes',
@@ -77,8 +77,8 @@ def upgrade():
sa.ForeignKeyConstraint(['chassis_id'], ['chassis.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_index('node_instance_uuid', 'nodes', ['instance_uuid'],
unique=False)
@@ -95,7 +95,7 @@ def upgrade():
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('address', name='uniq_ports0address'),
sa.UniqueConstraint('uuid', name='uniq_ports0uuid'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
# end Alembic commands
diff --git a/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py b/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py
index 0b5e8ff10..0cdc38fb2 100644
--- a/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py
+++ b/ironic/db/sqlalchemy/alembic/versions/2aac7e0872f6_add_deploy_templates.py
@@ -39,8 +39,8 @@ def upgrade():
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_deploytemplates0uuid'),
sa.UniqueConstraint('name', name='uniq_deploytemplates0name'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_table(
@@ -62,6 +62,6 @@ def upgrade():
sa.Index('deploy_template_id', 'deploy_template_id'),
sa.Index('deploy_template_steps_interface_idx', 'interface'),
sa.Index('deploy_template_steps_step_idx', 'step'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
diff --git a/ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py b/ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py
index 641419f09..b0e12e56b 100644
--- a/ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py
+++ b/ironic/db/sqlalchemy/alembic/versions/48d6c242bb9b_add_node_tags.py
@@ -36,7 +36,7 @@ def upgrade():
sa.Column('tag', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('node_id', 'tag'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_index('node_tags_idx', 'node_tags', ['tag'], unique=False)
diff --git a/ironic/db/sqlalchemy/alembic/versions/4dbec778866e_create_node_shard.py b/ironic/db/sqlalchemy/alembic/versions/4dbec778866e_create_node_shard.py
new file mode 100644
index 000000000..a446da701
--- /dev/null
+++ b/ironic/db/sqlalchemy/alembic/versions/4dbec778866e_create_node_shard.py
@@ -0,0 +1,31 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""create node.shard
+
+Revision ID: 4dbec778866e
+Revises: 0ac0f39bc5aa
+Create Date: 2022-11-10 14:20:59.175355
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '4dbec778866e'
+down_revision = '0ac0f39bc5aa'
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('shard', sa.String(length=255),
+ nullable=True))
+ op.create_index('shard_idx', 'nodes', ['shard'], unique=False)
diff --git a/ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py b/ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py
index a799c1b1d..7b1eacbe0 100644
--- a/ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py
+++ b/ironic/db/sqlalchemy/alembic/versions/5ea1b0d310e_added_port_group_table_and_altered_ports.py
@@ -42,8 +42,8 @@ def upgrade():
sa.UniqueConstraint('address',
name='uniq_portgroups0address'),
sa.UniqueConstraint('name', name='uniq_portgroups0name'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8')
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3')
op.add_column(u'ports', sa.Column('local_link_connection', sa.Text(),
nullable=True))
op.add_column(u'ports', sa.Column('portgroup_id', sa.Integer(),
diff --git a/ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py b/ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py
index 0d93bed30..33c141caa 100644
--- a/ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py
+++ b/ironic/db/sqlalchemy/alembic/versions/82c315d60161_add_bios_settings.py
@@ -37,6 +37,6 @@ def upgrade():
sa.Column('version', sa.String(length=15), nullable=True),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('node_id', 'name'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
diff --git a/ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py b/ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py
index 9f5b855ed..748d281e2 100644
--- a/ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py
+++ b/ironic/db/sqlalchemy/alembic/versions/9ef41f07cb58_add_node_history_table.py
@@ -48,5 +48,5 @@ def upgrade():
sa.Index('history_node_id_idx', 'node_id'),
sa.Index('history_uuid_idx', 'uuid'),
sa.Index('history_conductor_idx', 'conductor'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8')
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3')
diff --git a/ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py b/ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py
index 8cf30a2d9..66216b722 100644
--- a/ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py
+++ b/ironic/db/sqlalchemy/alembic/versions/b4130a7fc904_create_nodetraits_table.py
@@ -37,7 +37,7 @@ def upgrade():
sa.Column('trait', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('node_id', 'trait'),
- mysql_ENGINE='InnoDB',
- mysql_DEFAULT_CHARSET='UTF8'
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
)
op.create_index('node_traits_idx', 'node_traits', ['trait'], unique=False)
diff --git a/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py b/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py
index 55560dc68..74ab297a5 100644
--- a/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py
+++ b/ironic/db/sqlalchemy/alembic/versions/dd67b91a1981_add_allocations_table.py
@@ -48,7 +48,10 @@ def upgrade():
sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name', name='uniq_allocations0name'),
- sa.UniqueConstraint('uuid', name='uniq_allocations0uuid')
+ sa.UniqueConstraint('uuid', name='uniq_allocations0uuid'),
+ mysql_engine='InnoDB',
+ mysql_charset='UTF8MB3'
+
)
op.add_column('nodes', sa.Column('allocation_id', sa.Integer(),
nullable=True))
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index 05d5cc45e..d5f4a9d65 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -19,9 +19,11 @@ import datetime
import json
import threading
+from oslo_concurrency import lockutils
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
+from oslo_db.sqlalchemy import orm as sa_orm
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import netutils
@@ -30,8 +32,8 @@ from oslo_utils import timeutils
from oslo_utils import uuidutils
from osprofiler import sqlalchemy as osp_sqlalchemy
import sqlalchemy as sa
-from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
-from sqlalchemy.orm import joinedload
+from sqlalchemy import or_
+from sqlalchemy.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.orm import Load
from sqlalchemy.orm import selectinload
from sqlalchemy import sql
@@ -52,6 +54,10 @@ LOG = log.getLogger(__name__)
_CONTEXT = threading.local()
+
+RESERVATION_SEMAPHORE = "reserve_node_db_lock"
+synchronized = lockutils.synchronized_with_prefix('ironic-')
+
# NOTE(mgoddard): We limit the number of traits per node to 50 as this is the
# maximum number of traits per resource provider allowed in placement.
MAX_TRAITS_PER_NODE = 50
@@ -79,104 +85,43 @@ def _wrap_session(session):
return session
-def _get_node_query_with_all_for_single_node():
- """Return a query object for the Node joined with all relevant fields.
+def _get_node_select():
+ """Returns a SQLAlchemy Select Object for Nodes.
- This method utilizes a joined load query which creates a result set
- where corresponding traits, and tags, are joined together in the result
- set.
+ This method returns a pre-formatted select object which models
+ the entire Node object, allowing callers to operate on a node like
+ they would have with an SQLAlchemy ORM Query Object.
- This is more efficent from a Queries Per Second standpoint with the
- backend database, as they are not separate distinct queries which
- are being executed by the client.
+ This object *also* performs two additional select queries, in the form
+ of a selectin operation, to achieve the same results of a Join query,
+ but without the join query itself, and the client side load.
- The downside of this, is the relationship of tags and traits to nodes
- is that there may be multiple tags and traits for each node. Ultimately
- this style of query forces SQLAlchemy to de-duplicate the result set
- because the database returns the nodes portion of the result set for
- every trait, tag, or other table field the query is joined with.
- This looks like:
+ This method is best utilized when retrieving lists of nodes.
- node1, tag1, trait1
- node1, tag1, trait2
- node1, tag1, trait3
- node1, tag2, trait1
+ Select objects in this fashion were added as a result of SQLAlchemy 1.4
+ in preparation for SQLAlchemy 2.0's release to provide a unified
+ select interface.
- Et cetra, to create:
+ :returns: a select object
+ """
- node1, [tag1, tag2], [trait1, trait 2, trait3]
+ # NOTE(TheJulia): This returns a query in the SQLAlchemy 1.4->2.0
+ # migration style as query model loading is deprecated.
- Where joins are super in-efficent for Ironic, is where nodes are being
- enumerated, as the above result set pattern is not just for one node, but
- potentially thousands of nodes. In that case, we should use the
- _get_node_query_with_all_for_list helper to return a more appropriate
- query object which will be more efficient for the end user.
+ # This must use selectinload to avoid later need to invokededuplication.
+ return (sa.select(models.Node)
+ .options(selectinload(models.Node.tags),
+ selectinload(models.Node.traits)))
- :returns: a query object.
- """
- # NOTE(TheJulia): This *likely* ought to be selectinload, however
- # it is a very common hit pattern for Ironic to query just the node.
- # In those sorts of locations, the performance issues are less noticable
- # to end users. *IF/WHEN* we change it to be selectinload for nodes,
- # the resulting DB load will see a queries per second increase, which
- # we should be careful about.
-
- # NOTE(TheJulia): Basic benchmark difference
- # Test data creation: 67.202 seconds.
- # 2.43 seconds to obtain all nodes from SQLAlchemy (10k nodes)
- # 5.15 seconds to obtain all nodes *and* have node objects (10k nodes)
- return (model_query(models.Node)
- .options(joinedload('tags'))
- .options(joinedload('traits')))
-
-
-def _get_node_query_with_all_for_list():
- """Return a query object for the Node with queried extra fields.
-
- This method returns a query object joining tags and traits in a pattern
- where the result set is first built, and then the resulting associations
- are queried separately and the objects are reconciled by SQLAlchemy to
- build the composite objects based upon the associations.
-
- This results in the following query pattern when the query is executed:
-
- select $fields from nodes where x;
- # SQLAlchemy creates a list of associated node IDs.
- select $fields from tags where node_id in ('1', '3', '37268');
- select $fields from traits where node_id in ('1', '3', '37268');
-
- SQLAlchemy then returns a result set where the tags and traits are
- composited together efficently as opposed to having to deduplicate
- the result set. This shifts additional load to the database which
- was previously a high overhead operation with-in the conductor...
- which results in a slower conductor.
-
- :returns: a query object.
- """
- # NOTE(TheJulia): When comparing CI rubs *with* this being the default
- # for all general list operations, at 10k nodes, this pattern appears
- # to be on-par with a 5% variability between the two example benchmark
- # tests. That being said, the test *does* not include tags or traits
- # in it's test data set so client side deduplication is not measured.
-
- # NOTE(TheJulia): Basic benchmark difference
- # tests data creation: 67.117 seconds
- # 2.32 seconds to obtain all nodes from SQLAlchemy (10k nodes)
- # 4.99 seconds to obtain all nodes *and* have node objects (10k nodes)
- # If this holds true, the required record deduplication with joinedload
- # may be basically the same amount of overhead as requesting the tags
- # and traits separately.
- return (model_query(models.Node)
- .options(selectinload('tags'))
- .options(selectinload('traits')))
-
-
-def _get_deploy_template_query_with_steps():
- """Return a query object for the DeployTemplate joined with steps.
-
- :returns: a query object.
+
+def _get_deploy_template_select_with_steps():
+ """Return a select object for the DeployTemplate joined with steps.
+
+ :returns: a select object.
"""
- return model_query(models.DeployTemplate).options(joinedload('steps'))
+ return sa.select(
+ models.DeployTemplate
+ ).options(selectinload(models.DeployTemplate.steps))
def model_query(model, *args, **kwargs):
@@ -208,6 +153,26 @@ def add_identity_filter(query, value):
raise exception.InvalidIdentity(identity=value)
+def add_identity_where(op, model, value):
+ """Adds an identity filter to operation for where method.
+
+ Filters results by ID, if supplied value is a valid integer.
+ Otherwise attempts to filter results by UUID.
+
+ :param op: Initial operation to add filter to.
+ i.e. a update or delete statement.
+ :param model: The SQLAlchemy model to apply.
+ :param value: Value for filtering results by.
+ :return: Modified query.
+ """
+ if strutils.is_int_like(value):
+ return op.where(model.id == value)
+ elif uuidutils.is_uuid_like(value):
+ return op.where(model.uuid == value)
+ else:
+ raise exception.InvalidIdentity(identity=value)
+
+
def add_port_filter(query, value):
"""Adds a port-specific filter to a query.
@@ -280,7 +245,7 @@ def add_portgroup_filter(query, value):
if netutils.is_valid_mac(value):
return query.filter_by(address=value)
else:
- return add_identity_filter(query, value)
+ return add_identity_where(query, models.Portgroup, value)
def add_portgroup_filter_by_node(query, value):
@@ -331,9 +296,11 @@ def add_allocation_filter_by_conductor(query, value):
def _paginate_query(model, limit=None, marker=None, sort_key=None,
- sort_dir=None, query=None):
- if not query:
- query = model_query(model)
+ sort_dir=None, query=None, return_base_tuple=False):
+ # NOTE(TheJulia): We can't just ask for the bool of query if it is
+ # populated, so we need to ask if it is None.
+ if query is None:
+ query = sa.select(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
@@ -344,7 +311,34 @@ def _paginate_query(model, limit=None, marker=None, sort_key=None,
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
- return query.all()
+ with _session_for_read() as session:
+ # NOTE(TheJulia): SQLAlchemy 2.0 no longer returns pre-uniqued result
+ # sets in ORM mode, so we need to explicitly ask for it to be unique
+ # before returning it to the caller.
+ if isinstance(query, sa_orm.Query):
+ # The classic "Legacy" ORM query object result set which is
+ # deprecated in advance of SQLAlchemy 2.0.
+ # TODO(TheJulia): Calls of this style basically need to be
+ # eliminated in ironic as returning this way does not allow
+ # commit or rollback in enginefacade to occur until the returned
+ # object is garbage collected as ORM Query objects allow
+ # for DB interactions to occur after the fact, so it remains
+ # connected to the DB..
+ return query.all()
+ else:
+ # In this case, we have a sqlalchemy.sql.selectable.Select
+ # (most likely) which utilizes the unified select interface.
+ res = session.execute(query).fetchall()
+ if len(res) == 0:
+ # Return an empty list instead of a class with no objects.
+ return []
+ if return_base_tuple:
+ # The caller expects a tuple, lets just give it to them.
+ return res
+ # Everything is a tuple in a resultset from the unified interface
+ # but for objects, our model expects just object access,
+ # so we extract and return them.
+ return [r[0] for r in res]
def _filter_active_conductors(query, interval=None):
@@ -404,10 +398,11 @@ class Connection(api.Connection):
'uuid', 'id', 'fault', 'conductor_group',
'owner', 'lessee', 'instance_uuid'}
_NODE_IN_QUERY_FIELDS = {'%s_in' % field: field
- for field in ('uuid', 'provision_state')}
+ for field in ('uuid', 'provision_state', 'shard')}
_NODE_NON_NULL_FILTERS = {'associated': 'instance_uuid',
'reserved': 'reservation',
- 'with_power_state': 'power_state'}
+ 'with_power_state': 'power_state',
+ 'sharded': 'shard'}
_NODE_FILTERS = ({'chassis_uuid', 'reserved_by_any_of',
'provisioned_before', 'inspection_started_before',
'description_contains', 'project'}
@@ -513,15 +508,16 @@ class Connection(api.Connection):
else:
columns = [getattr(models.Node, c) for c in columns]
- query = model_query(*columns, base_model=models.Node)
+ query = sa.select(*columns)
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
- sort_key, sort_dir, query)
+ sort_key, sort_dir, query,
+ return_base_tuple=True)
def get_node_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None, fields=None):
if not fields:
- query = _get_node_query_with_all_for_list()
+ query = _get_node_select()
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
@@ -558,24 +554,25 @@ class Connection(api.Connection):
# with SQLAlchemy.
traits_found = True
use_columns.remove('traits')
-
# Generate the column object list so SQLAlchemy only fulfills
# the requested columns.
use_columns = [getattr(models.Node, c) for c in use_columns]
-
# In essence, traits (and anything else needed to generate the
# composite objects) need to be reconciled without using a join
# as multiple rows can be generated in the result set being returned
# from the database server. In this case, with traits, we use
# a selectinload pattern.
if traits_found:
- query = model_query(models.Node).options(
- Load(models.Node).load_only(*use_columns),
- selectinload(models.Node.traits))
+ query = sa.select(models.Node).options(
+ selectinload(models.Node.traits),
+ Load(models.Node).load_only(*use_columns)
+ )
else:
- query = model_query(models.Node).options(
- Load(models.Node).load_only(*use_columns))
-
+ # Note for others, if you ask for a whole model, it is
+ # modeled, i.e. you can access it as an object.
+ query = sa.select(models.NodeBase).options(
+ Load(models.Node).load_only(*use_columns)
+ )
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
@@ -596,19 +593,20 @@ class Connection(api.Connection):
raise exception.NodeNotFound(
_("Nodes cannot be found: %s") % ', '.join(missing))
- query = model_query(models.Node.uuid, models.Node.name).filter(
- sql.or_(models.Node.uuid.in_(uuids),
- models.Node.name.in_(names))
- )
- if project:
- query = query.filter((models.Node.owner == project)
- | (models.Node.lessee == project))
+ with _session_for_read() as session:
+ query = session.query(models.Node.uuid, models.Node.name).filter(
+ sql.or_(models.Node.uuid.in_(uuids),
+ models.Node.name.in_(names))
+ )
+ if project:
+ query = query.filter((models.Node.owner == project)
+ | (models.Node.lessee == project))
- for row in query:
- if row[0] in idents:
- mapping[row[0]] = row[0]
- if row[1] and row[1] in idents:
- mapping[row[1]] = row[0]
+ for row in query:
+ if row[0] in idents:
+ mapping[row[0]] = row[0]
+ if row[1] and row[1] in idents:
+ mapping[row[1]] = row[0]
missing = idents - set(mapping)
if missing:
@@ -617,40 +615,85 @@ class Connection(api.Connection):
return mapping
+ @synchronized(RESERVATION_SEMAPHORE, fair=True)
+ def _reserve_node_place_lock(self, tag, node_id, node):
+ try:
+ # NOTE(TheJulia): We explicitly do *not* synch the session
+ # so the other actions in the conductor do not become aware
+ # that the lock is in place and believe they hold the lock.
+ # This necessitates an overall lock in the code side, so
+ # we avoid conditions where two separate threads can believe
+ # they hold locks at the same time.
+ with _session_for_write() as session:
+ res = session.execute(
+ sa.update(models.Node).
+ where(models.Node.id == node.id).
+ where(models.Node.reservation == None). # noqa
+ values(reservation=tag).
+ execution_options(synchronize_session=False))
+ session.flush()
+ node = self._get_node_by_id_no_joins(node.id)
+ # NOTE(TheJulia): In SQLAlchemy 2.0 style, we don't
+ # magically get a changed node as they moved from the
+ # many ways to do things to singular ways to do things.
+ if res.rowcount != 1:
+ # Nothing updated and node exists. Must already be
+ # locked.
+ raise exception.NodeLocked(node=node.uuid,
+ host=node.reservation)
+ except NoResultFound:
+ # In the event that someone has deleted the node on
+ # another thread.
+ raise exception.NodeNotFound(node=node_id)
+
@oslo_db_api.retry_on_deadlock
def reserve_node(self, tag, node_id):
- with _session_for_write():
- query = _get_node_query_with_all_for_single_node()
- query = add_identity_filter(query, node_id)
- count = query.filter_by(reservation=None).update(
- {'reservation': tag}, synchronize_session=False)
+ with _session_for_read() as session:
try:
+ # TODO(TheJulia): Figure out a good way to query
+ # this so that we do it as light as possible without
+ # the full object invocation, which will speed lock
+ # activities. Granted, this is all at the DB level
+ # so maybe that is okay in the grand scheme of things.
+ query = session.query(models.Node)
+ query = add_identity_filter(query, node_id)
node = query.one()
- if count != 1:
- # Nothing updated and node exists. Must already be
- # locked.
- raise exception.NodeLocked(node=node.uuid,
- host=node['reservation'])
- return node
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
+ if node.reservation:
+ # Fail fast, instead of attempt the update.
+ raise exception.NodeLocked(node=node.uuid,
+ host=node.reservation)
+ self._reserve_node_place_lock(tag, node_id, node)
+ # Return a node object as that is the contract for this method.
+ return self.get_node_by_id(node.id)
@oslo_db_api.retry_on_deadlock
def release_node(self, tag, node_id):
- with _session_for_write():
- query = model_query(models.Node)
- query = add_identity_filter(query, node_id)
- # be optimistic and assume we usually release a reservation
- count = query.filter_by(reservation=tag).update(
- {'reservation': None}, synchronize_session=False)
+ with _session_for_read() as session:
+ try:
+ query = session.query(models.Node)
+ query = add_identity_filter(query, node_id)
+ node = query.one()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_id)
+ with _session_for_write() as session:
try:
- if count != 1:
- node = query.one()
- if node['reservation'] is None:
+ res = session.execute(
+ sa.update(models.Node).
+ where(models.Node.id == node.id).
+ where(models.Node.reservation == tag).
+ values(reservation=None).
+ execution_options(synchronize_session=False)
+ )
+ node = self.get_node_by_id(node.id)
+ if res.rowcount != 1:
+ if node.reservation is None:
raise exception.NodeNotLocked(node=node.uuid)
else:
raise exception.NodeLocked(node=node.uuid,
host=node['reservation'])
+ session.flush()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
@@ -676,47 +719,68 @@ class Connection(api.Connection):
node = models.Node()
node.update(values)
- with _session_for_write() as session:
- try:
+ try:
+ with _session_for_write() as session:
session.add(node)
+ # Set tags & traits to [] for new created node
+ # NOTE(mgoddard): We need to set the tags and traits fields in
+ # the session context, otherwise SQLAlchemy will try and fail
+ # to lazy load the attributes, resulting in an exception being
+ # raised.
+ node['tags'] = []
+ node['traits'] = []
session.flush()
- except db_exc.DBDuplicateEntry as exc:
- if 'name' in exc.columns:
- raise exception.DuplicateName(name=values['name'])
- elif 'instance_uuid' in exc.columns:
- raise exception.InstanceAssociated(
- instance_uuid=values['instance_uuid'],
- node=values['uuid'])
- raise exception.NodeAlreadyExists(uuid=values['uuid'])
- # Set tags & traits to [] for new created node
- # NOTE(mgoddard): We need to set the tags and traits fields in the
- # session context, otherwise SQLAlchemy will try and fail to lazy
- # load the attributes, resulting in an exception being raised.
- node['tags'] = []
- node['traits'] = []
+ except db_exc.DBDuplicateEntry as exc:
+ if 'name' in exc.columns:
+ raise exception.DuplicateName(name=values['name'])
+ elif 'instance_uuid' in exc.columns:
+ raise exception.InstanceAssociated(
+ instance_uuid=values['instance_uuid'],
+ node=values['uuid'])
+ raise exception.NodeAlreadyExists(uuid=values['uuid'])
return node
+ def _get_node_by_id_no_joins(self, node_id):
+ # TODO(TheJulia): Maybe replace with this with a minimal
+ # "get these three fields" thing.
+ try:
+ with _session_for_read() as session:
+ # Explicitly load NodeBase as the invocation of the
+ # priamary model object reesults in the join query
+ # triggering.
+ return session.execute(
+ sa.select(models.NodeBase).filter_by(id=node_id).limit(1)
+ ).scalars().first()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_id)
+
def get_node_by_id(self, node_id):
- query = _get_node_query_with_all_for_single_node()
- query = query.filter_by(id=node_id)
try:
- return query.one()
+ query = _get_node_select()
+ with _session_for_read() as session:
+ return session.scalars(
+ query.filter_by(id=node_id).limit(1)
+ ).unique().one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
def get_node_by_uuid(self, node_uuid):
- query = _get_node_query_with_all_for_single_node()
- query = query.filter_by(uuid=node_uuid)
try:
- return query.one()
+ query = _get_node_select()
+ with _session_for_read() as session:
+ return session.scalars(
+ query.filter_by(uuid=node_uuid).limit(1)
+ ).unique().one()
except NoResultFound:
raise exception.NodeNotFound(node=node_uuid)
def get_node_by_name(self, node_name):
- query = _get_node_query_with_all_for_single_node()
- query = query.filter_by(name=node_name)
try:
- return query.one()
+ query = _get_node_select()
+ with _session_for_read() as session:
+ return session.scalars(
+ query.filter_by(name=node_name).limit(1)
+ ).unique().one()
except NoResultFound:
raise exception.NodeNotFound(node=node_name)
@@ -724,20 +788,19 @@ class Connection(api.Connection):
if not uuidutils.is_uuid_like(instance):
raise exception.InvalidUUID(uuid=instance)
- query = _get_node_query_with_all_for_single_node()
- query = query.filter_by(instance_uuid=instance)
-
try:
- result = query.one()
+ query = _get_node_select()
+ with _session_for_read() as session:
+ return session.scalars(
+ query.filter_by(instance_uuid=instance).limit(1)
+ ).unique().one()
except NoResultFound:
- raise exception.InstanceNotFound(instance=instance)
-
- return result
+ raise exception.InstanceNotFound(instance_uuid=instance)
@oslo_db_api.retry_on_deadlock
def destroy_node(self, node_id):
with _session_for_write() as session:
- query = model_query(models.Node)
+ query = session.query(models.Node)
query = add_identity_filter(query, node_id)
try:
@@ -755,47 +818,53 @@ class Connection(api.Connection):
if uuidutils.is_uuid_like(node_id):
node_id = node_ref['id']
- port_query = model_query(models.Port)
+ port_query = session.query(models.Port)
port_query = add_port_filter_by_node(port_query, node_id)
port_query.delete()
- portgroup_query = model_query(models.Portgroup)
+ portgroup_query = session.query(models.Portgroup)
portgroup_query = add_portgroup_filter_by_node(portgroup_query,
node_id)
portgroup_query.delete()
# Delete all tags attached to the node
- tag_query = model_query(models.NodeTag).filter_by(node_id=node_id)
+ tag_query = session.query(models.NodeTag).filter_by(
+ node_id=node_id)
tag_query.delete()
# Delete all traits attached to the node
- trait_query = model_query(
+ trait_query = session.query(
models.NodeTrait).filter_by(node_id=node_id)
trait_query.delete()
- volume_connector_query = model_query(
+ volume_connector_query = session.query(
models.VolumeConnector).filter_by(node_id=node_id)
volume_connector_query.delete()
- volume_target_query = model_query(
+ volume_target_query = session.query(
models.VolumeTarget).filter_by(node_id=node_id)
volume_target_query.delete()
# delete all bios attached to the node
- bios_settings_query = model_query(
+ bios_settings_query = session.query(
models.BIOSSetting).filter_by(node_id=node_id)
bios_settings_query.delete()
# delete all allocations for this node
- allocation_query = model_query(
+ allocation_query = session.query(
models.Allocation).filter_by(node_id=node_id)
allocation_query.delete()
# delete all history for this node
- history_query = model_query(
+ history_query = session.query(
models.NodeHistory).filter_by(node_id=node_id)
history_query.delete()
+ # delete all inventory for this node
+ inventory_query = session.query(
+ models.NodeInventory).filter_by(node_id=node_id)
+ inventory_query.delete()
+
query.delete()
def update_node(self, node_id, values):
@@ -820,10 +889,10 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def _do_update_node(self, node_id, values):
- with _session_for_write():
+ with _session_for_write() as session:
# NOTE(mgoddard): Don't issue a joined query for the update as this
# does not work with PostgreSQL.
- query = model_query(models.Node)
+ query = session.query(models.Node)
query = add_identity_filter(query, node_id)
try:
ref = query.with_for_update().one()
@@ -835,20 +904,26 @@ class Connection(api.Connection):
if values['provision_state'] == states.INSPECTING:
values['inspection_started_at'] = timeutils.utcnow()
values['inspection_finished_at'] = None
- elif (ref.provision_state == states.INSPECTING
+ elif ((ref.provision_state == states.INSPECTING
+ or ref.provision_state == states.INSPECTWAIT)
and values['provision_state'] == states.MANAGEABLE):
values['inspection_finished_at'] = timeutils.utcnow()
values['inspection_started_at'] = None
- elif (ref.provision_state == states.INSPECTING
+ elif ((ref.provision_state == states.INSPECTING
+ or ref.provision_state == states.INSPECTWAIT)
and values['provision_state'] == states.INSPECTFAIL):
values['inspection_started_at'] = None
ref.update(values)
- # Return the updated node model joined with all relevant fields.
- query = _get_node_query_with_all_for_single_node()
- query = add_identity_filter(query, node_id)
- return query.one()
+ # Return the updated node model joined with all relevant fields.
+ query = _get_node_select()
+ query = add_identity_filter(query, node_id)
+ # FIXME(TheJulia): This entire method needs to be re-written to
+ # use the proper execution format for SQLAlchemy 2.0. Likely
+ # A query, independent update, and a re-query on the transaction.
+ with _session_for_read() as session:
+ return session.execute(query).one()[0]
def get_port_by_id(self, port_id):
query = model_query(models.Port).filter_by(id=port_id)
@@ -885,7 +960,7 @@ class Connection(api.Connection):
def get_port_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None, owner=None,
project=None):
- query = model_query(models.Port)
+ query = sa.select(models.Port)
if owner:
query = add_port_filter_by_node_owner(query, owner)
elif project:
@@ -893,11 +968,22 @@ class Connection(api.Connection):
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
+ def get_ports_by_shards(self, shards, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ shard_node_ids = sa.select(models.Node) \
+ .where(models.Node.shard.in_(shards)) \
+ .with_only_columns(models.Node.id)
+ with _session_for_read() as session:
+ query = session.query(models.Port).filter(
+ models.Port.node_id.in_(shard_node_ids))
+ ports = _paginate_query(
+ models.Port, limit, marker, sort_key, sort_dir, query)
+ return ports
+
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None, owner=None,
project=None):
- query = model_query(models.Port)
- query = query.filter_by(node_id=node_id)
+ query = sa.select(models.Port).where(models.Port.node_id == node_id)
if owner:
query = add_port_filter_by_node_owner(query, owner)
elif project:
@@ -908,8 +994,8 @@ class Connection(api.Connection):
def get_ports_by_portgroup_id(self, portgroup_id, limit=None, marker=None,
sort_key=None, sort_dir=None, owner=None,
project=None):
- query = model_query(models.Port)
- query = query.filter_by(portgroup_id=portgroup_id)
+ query = sa.select(models.Port).where(
+ models.Port.portgroup_id == portgroup_id)
if owner:
query = add_port_filter_by_node_owner(query, owner)
elif project:
@@ -924,15 +1010,15 @@ class Connection(api.Connection):
port = models.Port()
port.update(values)
- with _session_for_write() as session:
- try:
+ try:
+ with _session_for_write() as session:
session.add(port)
session.flush()
- except db_exc.DBDuplicateEntry as exc:
- if 'address' in exc.columns:
- raise exception.MACAlreadyExists(mac=values['address'])
- raise exception.PortAlreadyExists(uuid=values['uuid'])
- return port
+ except db_exc.DBDuplicateEntry as exc:
+ if 'address' in exc.columns:
+ raise exception.MACAlreadyExists(mac=values['address'])
+ raise exception.PortAlreadyExists(uuid=values['uuid'])
+ return port
@oslo_db_api.retry_on_deadlock
def update_port(self, port_id, values):
@@ -940,10 +1026,9 @@ class Connection(api.Connection):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Port.")
raise exception.InvalidParameterValue(err=msg)
-
try:
with _session_for_write() as session:
- query = model_query(models.Port)
+ query = session.query(models.Port)
query = add_port_filter(query, port_id)
ref = query.one()
ref.update(values)
@@ -959,8 +1044,8 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def destroy_port(self, port_id):
- with _session_for_write():
- query = model_query(models.Port)
+ with _session_for_write() as session:
+ query = session.query(models.Port)
query = add_port_filter(query, port_id)
count = query.delete()
if count == 0:
@@ -1000,7 +1085,7 @@ class Connection(api.Connection):
def get_portgroup_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None, project=None):
- query = model_query(models.Portgroup)
+ query = sa.select(models.Portgroup)
if project:
query = add_portgroup_filter_by_node_project(query, project)
return _paginate_query(models.Portgroup, limit, marker,
@@ -1008,8 +1093,8 @@ class Connection(api.Connection):
def get_portgroups_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None, project=None):
- query = model_query(models.Portgroup)
- query = query.filter_by(node_id=node_id)
+ query = sa.select(models.Portgroup)
+ query = query.where(models.Portgroup.node_id == node_id)
if project:
query = add_portgroup_filter_by_node_project(query, project)
return _paginate_query(models.Portgroup, limit, marker,
@@ -1045,7 +1130,7 @@ class Connection(api.Connection):
with _session_for_write() as session:
try:
- query = model_query(models.Portgroup)
+ query = session.query(models.Portgroup)
query = add_portgroup_filter(query, portgroup_id)
ref = query.one()
ref.update(values)
@@ -1066,34 +1151,40 @@ class Connection(api.Connection):
def destroy_portgroup(self, portgroup_id):
def portgroup_not_empty(session):
"""Checks whether the portgroup does not have ports."""
-
- query = model_query(models.Port)
- query = add_port_filter_by_portgroup(query, portgroup_id)
-
- return query.count() != 0
+ with _session_for_read() as session:
+ return session.scalar(
+ sa.select(
+ sa.func.count(models.Port.id)
+ ).where(models.Port.portgroup_id == portgroup_id)) != 0
with _session_for_write() as session:
if portgroup_not_empty(session):
raise exception.PortgroupNotEmpty(portgroup=portgroup_id)
- query = model_query(models.Portgroup, session=session)
- query = add_identity_filter(query, portgroup_id)
+ query = sa.delete(models.Portgroup)
+ query = add_identity_where(query, models.Portgroup, portgroup_id)
- count = query.delete()
+ count = session.execute(query).rowcount
if count == 0:
raise exception.PortgroupNotFound(portgroup=portgroup_id)
def get_chassis_by_id(self, chassis_id):
- query = model_query(models.Chassis).filter_by(id=chassis_id)
+ query = sa.select(models.Chassis).where(
+ models.Chassis.id == chassis_id)
+
try:
- return query.one()
+ with _session_for_read() as session:
+ return session.execute(query).one()[0]
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis_id)
def get_chassis_by_uuid(self, chassis_uuid):
- query = model_query(models.Chassis).filter_by(uuid=chassis_uuid)
+ query = sa.select(models.Chassis).where(
+ models.Chassis.uuid == chassis_uuid)
+
try:
- return query.one()
+ with _session_for_read() as session:
+ return session.execute(query).one()[0]
except NoResultFound:
raise exception.ChassisNotFound(chassis=chassis_uuid)
@@ -1109,13 +1200,13 @@ class Connection(api.Connection):
chassis = models.Chassis()
chassis.update(values)
- with _session_for_write() as session:
- try:
+ try:
+ with _session_for_write() as session:
session.add(chassis)
session.flush()
- except db_exc.DBDuplicateEntry:
- raise exception.ChassisAlreadyExists(uuid=values['uuid'])
- return chassis
+ except db_exc.DBDuplicateEntry:
+ raise exception.ChassisAlreadyExists(uuid=values['uuid'])
+ return chassis
@oslo_db_api.retry_on_deadlock
def update_chassis(self, chassis_id, values):
@@ -1124,9 +1215,9 @@ class Connection(api.Connection):
msg = _("Cannot overwrite UUID for an existing Chassis.")
raise exception.InvalidParameterValue(err=msg)
- with _session_for_write():
- query = model_query(models.Chassis)
- query = add_identity_filter(query, chassis_id)
+ with _session_for_write() as session:
+ query = session.query(models.Chassis)
+ query = add_identity_where(query, models.Chassis, chassis_id)
count = query.update(values)
if count != 1:
@@ -1136,19 +1227,14 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def destroy_chassis(self, chassis_id):
- def chassis_not_empty():
- """Checks whether the chassis does not have nodes."""
-
- query = model_query(models.Node)
+ with _session_for_write() as session:
+ query = session.query(models.Node)
query = add_node_filter_by_chassis(query, chassis_id)
- return query.count() != 0
-
- with _session_for_write():
- if chassis_not_empty():
+ if query.count() != 0:
raise exception.ChassisNotEmpty(chassis=chassis_id)
- query = model_query(models.Chassis)
+ query = session.query(models.Chassis)
query = add_identity_filter(query, chassis_id)
count = query.delete()
@@ -1158,7 +1244,7 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def register_conductor(self, values, update_existing=False):
with _session_for_write() as session:
- query = (model_query(models.Conductor)
+ query = (session.query(models.Conductor)
.filter_by(hostname=values['hostname']))
try:
ref = query.one()
@@ -1182,39 +1268,46 @@ class Connection(api.Connection):
def get_conductor(self, hostname, online=True):
try:
- query = model_query(models.Conductor).filter_by(hostname=hostname)
+ query = sa.select(models.Conductor).where(
+ models.Conductor.hostname == hostname)
if online is not None:
- query = query.filter_by(online=online)
- return query.one()
+ query = query.where(models.Conductor.online == online)
+ with _session_for_read() as session:
+ res = session.execute(query).one()[0]
+ return res
except NoResultFound:
raise exception.ConductorNotFound(conductor=hostname)
@oslo_db_api.retry_on_deadlock
def unregister_conductor(self, hostname):
- with _session_for_write():
- query = (model_query(models.Conductor)
- .filter_by(hostname=hostname, online=True))
- count = query.update({'online': False})
+ with _session_for_write() as session:
+ query = sa.update(models.Conductor).where(
+ models.Conductor.hostname == hostname,
+ models.Conductor.online == True).values( # noqa
+ online=False)
+ count = session.execute(query).rowcount
if count == 0:
raise exception.ConductorNotFound(conductor=hostname)
@oslo_db_api.retry_on_deadlock
def touch_conductor(self, hostname):
- with _session_for_write():
- query = (model_query(models.Conductor)
- .filter_by(hostname=hostname))
- # since we're not changing any other field, manually set updated_at
- # and since we're heartbeating, make sure that online=True
- count = query.update({'updated_at': timeutils.utcnow(),
- 'online': True})
- if count == 0:
- raise exception.ConductorNotFound(conductor=hostname)
+ with _session_for_write() as session:
+ query = sa.update(models.Conductor).where(
+ models.Conductor.hostname == hostname
+ ).values({
+ 'updated_at': timeutils.utcnow(),
+ 'online': True}
+ ).execution_options(synchronize_session=False)
+ res = session.execute(query)
+ count = res.rowcount
+ if count == 0:
+ raise exception.ConductorNotFound(conductor=hostname)
@oslo_db_api.retry_on_deadlock
def clear_node_reservations_for_conductor(self, hostname):
nodes = []
- with _session_for_write():
- query = (model_query(models.Node)
+ with _session_for_write() as session:
+ query = (session.query(models.Node)
.filter(models.Node.reservation.ilike(hostname)))
nodes = [node['uuid'] for node in query]
query.update({'reservation': None}, synchronize_session=False)
@@ -1228,8 +1321,8 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def clear_node_target_power_state(self, hostname):
nodes = []
- with _session_for_write():
- query = (model_query(models.Node)
+ with _session_for_write() as session:
+ query = (session.query(models.Node)
.filter(models.Node.reservation.ilike(hostname)))
query = query.filter(models.Node.target_power_state != sql.null())
nodes = [node['uuid'] for node in query]
@@ -1247,46 +1340,51 @@ class Connection(api.Connection):
'%(nodes)s', {'nodes': nodes})
def get_active_hardware_type_dict(self, use_groups=False):
- query = (model_query(models.ConductorHardwareInterfaces,
- models.Conductor)
- .join(models.Conductor))
- result = _filter_active_conductors(query)
-
- d2c = collections.defaultdict(set)
- for iface_row, cdr_row in result:
- hw_type = iface_row['hardware_type']
- if use_groups:
- key = '%s:%s' % (cdr_row['conductor_group'], hw_type)
- else:
- key = hw_type
- d2c[key].add(cdr_row['hostname'])
+ with _session_for_read() as session:
+ query = (session.query(models.ConductorHardwareInterfaces,
+ models.Conductor)
+ .join(models.Conductor))
+ result = _filter_active_conductors(query)
+
+ d2c = collections.defaultdict(set)
+ for iface_row, cdr_row in result:
+ hw_type = iface_row['hardware_type']
+ if use_groups:
+ key = '%s:%s' % (cdr_row['conductor_group'], hw_type)
+ else:
+ key = hw_type
+ d2c[key].add(cdr_row['hostname'])
return d2c
def get_offline_conductors(self, field='hostname'):
- field = getattr(models.Conductor, field)
- interval = CONF.conductor.heartbeat_timeout
- limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
- result = (model_query(field)
- .filter(models.Conductor.updated_at < limit))
- return [row[0] for row in result]
+ with _session_for_read() as session:
+ field = getattr(models.Conductor, field)
+ interval = CONF.conductor.heartbeat_timeout
+ limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
+ result = (session.query(field)
+ .filter(models.Conductor.updated_at < limit))
+ return [row[0] for row in result]
def get_online_conductors(self):
- query = model_query(models.Conductor.hostname)
- query = _filter_active_conductors(query)
- return [row[0] for row in query]
+ with _session_for_read() as session:
+ query = session.query(models.Conductor.hostname)
+ query = _filter_active_conductors(query)
+ return [row[0] for row in query]
def list_conductor_hardware_interfaces(self, conductor_id):
- query = (model_query(models.ConductorHardwareInterfaces)
- .filter_by(conductor_id=conductor_id))
- return query.all()
+ with _session_for_read() as session:
+ query = (session.query(models.ConductorHardwareInterfaces)
+ .filter_by(conductor_id=conductor_id))
+ return query.all()
def list_hardware_type_interfaces(self, hardware_types):
- query = (model_query(models.ConductorHardwareInterfaces)
- .filter(models.ConductorHardwareInterfaces.hardware_type
- .in_(hardware_types)))
+ with _session_for_read() as session:
+ query = (session.query(models.ConductorHardwareInterfaces)
+ .filter(models.ConductorHardwareInterfaces.hardware_type
+ .in_(hardware_types)))
- query = _filter_active_conductors(query)
- return query.all()
+ query = _filter_active_conductors(query)
+ return query.all()
@oslo_db_api.retry_on_deadlock
def register_conductor_hardware_interfaces(self, conductor_id, interfaces):
@@ -1306,22 +1404,23 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def unregister_conductor_hardware_interfaces(self, conductor_id):
- with _session_for_write():
- query = (model_query(models.ConductorHardwareInterfaces)
+ with _session_for_write() as session:
+ query = (session.query(models.ConductorHardwareInterfaces)
.filter_by(conductor_id=conductor_id))
query.delete()
@oslo_db_api.retry_on_deadlock
def touch_node_provisioning(self, node_id):
- with _session_for_write():
- query = model_query(models.Node)
+ with _session_for_write() as session:
+ query = session.query(models.Node)
query = add_identity_filter(query, node_id)
count = query.update({'provision_updated_at': timeutils.utcnow()})
if count == 0:
raise exception.NodeNotFound(node=node_id)
- def _check_node_exists(self, node_id):
- if not model_query(models.Node).filter_by(id=node_id).scalar():
+ def _check_node_exists(self, session, node_id):
+ if not session.query(models.Node).where(
+ models.Node.id == node_id).scalar():
raise exception.NodeNotFound(node=node_id)
@oslo_db_api.retry_on_deadlock
@@ -1340,24 +1439,25 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def unset_node_tags(self, node_id):
- self._check_node_exists(node_id)
- with _session_for_write():
- model_query(models.NodeTag).filter_by(node_id=node_id).delete()
+ with _session_for_write() as session:
+ self._check_node_exists(session, node_id)
+ session.query(models.NodeTag).filter_by(node_id=node_id).delete()
def get_node_tags_by_node_id(self, node_id):
- self._check_node_exists(node_id)
- result = (model_query(models.NodeTag)
- .filter_by(node_id=node_id)
- .all())
+ with _session_for_read() as session:
+ self._check_node_exists(session, node_id)
+ result = (session.query(models.NodeTag)
+ .filter_by(node_id=node_id)
+ .all())
return result
@oslo_db_api.retry_on_deadlock
def add_node_tag(self, node_id, tag):
- node_tag = models.NodeTag(tag=tag, node_id=node_id)
-
- self._check_node_exists(node_id)
try:
with _session_for_write() as session:
+ node_tag = models.NodeTag(tag=tag, node_id=node_id)
+
+ self._check_node_exists(session, node_id)
session.add(node_tag)
session.flush()
except db_exc.DBDuplicateEntry:
@@ -1368,26 +1468,33 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def delete_node_tag(self, node_id, tag):
- self._check_node_exists(node_id)
- with _session_for_write():
- result = model_query(models.NodeTag).filter_by(
+ with _session_for_write() as session:
+ self._check_node_exists(session, node_id)
+ result = session.query(models.NodeTag).filter_by(
node_id=node_id, tag=tag).delete()
- if not result:
- raise exception.NodeTagNotFound(node_id=node_id, tag=tag)
+ if not result:
+ raise exception.NodeTagNotFound(node_id=node_id, tag=tag)
def node_tag_exists(self, node_id, tag):
- self._check_node_exists(node_id)
- q = model_query(models.NodeTag).filter_by(node_id=node_id, tag=tag)
- return model_query(q.exists()).scalar()
+ with _session_for_read() as session:
+ self._check_node_exists(session, node_id)
+ q = session.query(models.NodeTag).filter_by(
+ node_id=node_id, tag=tag)
+ return session.query(q.exists()).scalar()
def get_node_by_port_addresses(self, addresses):
- q = _get_node_query_with_all_for_single_node()
+ q = _get_node_select()
q = q.distinct().join(models.Port)
q = q.filter(models.Port.address.in_(addresses))
try:
- return q.one()
+ # FIXME(TheJulia): This needs to be updated to be
+ # an explicit query to identify the node for SQLAlchemy.
+ with _session_for_read() as session:
+ # Always return the first element, since we always
+ # get a tuple from sqlalchemy.
+ return session.execute(q).one()[0]
except NoResultFound:
raise exception.NodeNotFound(
_('Node with port addresses %s was not found')
@@ -1399,7 +1506,7 @@ class Connection(api.Connection):
def get_volume_connector_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None, project=None):
- query = model_query(models.VolumeConnector)
+ query = sa.select(models.VolumeConnector)
if project:
query = add_volume_conn_filter_by_node_project(query, project)
return _paginate_query(models.VolumeConnector, limit, marker,
@@ -1423,7 +1530,8 @@ class Connection(api.Connection):
def get_volume_connectors_by_node_id(self, node_id, limit=None,
marker=None, sort_key=None,
sort_dir=None, project=None):
- query = model_query(models.VolumeConnector).filter_by(node_id=node_id)
+ query = sa.select(models.VolumeConnector).where(
+ models.VolumeConnector.node_id == node_id)
if project:
add_volume_conn_filter_by_node_project(query, project)
return _paginate_query(models.VolumeConnector, limit, marker,
@@ -1457,7 +1565,7 @@ class Connection(api.Connection):
try:
with _session_for_write() as session:
- query = model_query(models.VolumeConnector)
+ query = session.query(models.VolumeConnector)
query = add_identity_filter(query, ident)
ref = query.one()
orig_type = ref['type']
@@ -1475,8 +1583,8 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def destroy_volume_connector(self, ident):
- with _session_for_write():
- query = model_query(models.VolumeConnector)
+ with _session_for_write() as session:
+ query = session.query(models.VolumeConnector)
query = add_identity_filter(query, ident)
count = query.delete()
if count == 0:
@@ -1484,14 +1592,15 @@ class Connection(api.Connection):
def get_volume_target_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None, project=None):
- query = model_query(models.VolumeTarget)
+ query = sa.select(models.VolumeTarget)
if project:
query = add_volume_target_filter_by_node_project(query, project)
return _paginate_query(models.VolumeTarget, limit, marker,
sort_key, sort_dir, query)
def get_volume_target_by_id(self, db_id):
- query = model_query(models.VolumeTarget).filter_by(id=db_id)
+ query = model_query(models.VolumeTarget).where(
+ models.VolumeTarget.id == db_id)
try:
return query.one()
except NoResultFound:
@@ -1507,7 +1616,8 @@ class Connection(api.Connection):
def get_volume_targets_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None,
project=None):
- query = model_query(models.VolumeTarget).filter_by(node_id=node_id)
+ query = sa.select(models.VolumeTarget).where(
+ models.VolumeTarget.node_id == node_id)
if project:
add_volume_target_filter_by_node_project(query, project)
return _paginate_query(models.VolumeTarget, limit, marker, sort_key,
@@ -1516,7 +1626,8 @@ class Connection(api.Connection):
def get_volume_targets_by_volume_id(self, volume_id, limit=None,
marker=None, sort_key=None,
sort_dir=None, project=None):
- query = model_query(models.VolumeTarget).filter_by(volume_id=volume_id)
+ query = sa.select(models.VolumeTarget).where(
+ models.VolumeTarget.volume_id == volume_id)
if project:
query = add_volume_target_filter_by_node_project(query, project)
return _paginate_query(models.VolumeTarget, limit, marker, sort_key,
@@ -1549,7 +1660,7 @@ class Connection(api.Connection):
try:
with _session_for_write() as session:
- query = model_query(models.VolumeTarget)
+ query = session.query(models.VolumeTarget)
query = add_identity_filter(query, ident)
ref = query.one()
orig_boot_index = ref['boot_index']
@@ -1564,8 +1675,8 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def destroy_volume_target(self, ident):
- with _session_for_write():
- query = model_query(models.VolumeTarget)
+ with _session_for_write() as session:
+ query = session.query(models.VolumeTarget)
query = add_identity_filter(query, ident)
count = query.delete()
if count == 0:
@@ -1585,6 +1696,8 @@ class Connection(api.Connection):
if not versions:
return []
+ if model_name == 'Node':
+ model_name = 'NodeBase'
model = models.get_class(model_name)
# NOTE(rloo): .notin_ does not handle null:
@@ -1613,7 +1726,11 @@ class Connection(api.Connection):
"""
object_versions = release_mappings.get_object_versions()
table_missing_ok = False
- for model in models.Base.__subclasses__():
+ models_to_check = models.Base.__subclasses__()
+ # We need to append Node to the list as it is a subclass of
+ # NodeBase, which is intentional to delineate excess queries.
+ models_to_check.append(models.Node)
+ for model in models_to_check:
if model.__name__ not in object_versions:
continue
@@ -1687,13 +1804,15 @@ class Connection(api.Connection):
mapping = release_mappings.RELEASE_MAPPING['master']['objects']
total_to_migrate = 0
total_migrated = 0
-
- sql_models = [model for model in models.Base.__subclasses__()
+ all_models = models.Base.__subclasses__()
+ all_models.append(models.Node)
+ sql_models = [model for model in all_models
if model.__name__ in mapping]
- for model in sql_models:
- version = mapping[model.__name__][0]
- query = model_query(model).filter(model.version != version)
- total_to_migrate += query.count()
+ with _session_for_read() as session:
+ for model in sql_models:
+ version = mapping[model.__name__][0]
+ query = session.query(model).filter(model.version != version)
+ total_to_migrate += query.count()
if not total_to_migrate:
return total_to_migrate, 0
@@ -1717,8 +1836,8 @@ class Connection(api.Connection):
for model in sql_models:
version = mapping[model.__name__][0]
num_migrated = 0
- with _session_for_write():
- query = model_query(model).filter(model.version != version)
+ with _session_for_write() as session:
+ query = session.query(model).filter(model.version != version)
# NOTE(rloo) Caution here; after doing query.count(), it is
# possible that the value is different in the
# next invocation of the query.
@@ -1730,14 +1849,14 @@ class Connection(api.Connection):
for obj in query.slice(0, max_to_migrate):
ids.append(obj['id'])
num_migrated = (
- model_query(model).
+ session.query(model).
filter(sql.and_(model.id.in_(ids),
model.version != version)).
update({model.version: version},
synchronize_session=False))
else:
num_migrated = (
- model_query(model).
+ session.query(model).
filter(model.version != version).
update({model.version: version},
synchronize_session=False))
@@ -1788,15 +1907,16 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def unset_node_traits(self, node_id):
- self._check_node_exists(node_id)
- with _session_for_write():
- model_query(models.NodeTrait).filter_by(node_id=node_id).delete()
+ with _session_for_write() as session:
+ self._check_node_exists(session, node_id)
+ session.query(models.NodeTrait).filter_by(node_id=node_id).delete()
def get_node_traits_by_node_id(self, node_id):
- self._check_node_exists(node_id)
- result = (model_query(models.NodeTrait)
- .filter_by(node_id=node_id)
- .all())
+ with _session_for_read() as session:
+ self._check_node_exists(session, node_id)
+ result = (session.query(models.NodeTrait)
+ .filter_by(node_id=node_id)
+ .all())
return result
@oslo_db_api.retry_on_deadlock
@@ -1804,13 +1924,14 @@ class Connection(api.Connection):
node_trait = models.NodeTrait(trait=trait, node_id=node_id,
version=version)
- self._check_node_exists(node_id)
try:
with _session_for_write() as session:
+ self._check_node_exists(session, node_id)
+
session.add(node_trait)
session.flush()
- num_traits = (model_query(models.NodeTrait)
+ num_traits = (session.query(models.NodeTrait)
.filter_by(node_id=node_id).count())
self._verify_max_traits_per_node(node_id, num_traits)
except db_exc.DBDuplicateEntry:
@@ -1821,25 +1942,26 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def delete_node_trait(self, node_id, trait):
- self._check_node_exists(node_id)
- with _session_for_write():
- result = model_query(models.NodeTrait).filter_by(
+ with _session_for_write() as session:
+ self._check_node_exists(session, node_id)
+ result = session.query(models.NodeTrait).filter_by(
node_id=node_id, trait=trait).delete()
- if not result:
- raise exception.NodeTraitNotFound(node_id=node_id, trait=trait)
+ if not result:
+ raise exception.NodeTraitNotFound(node_id=node_id, trait=trait)
def node_trait_exists(self, node_id, trait):
- self._check_node_exists(node_id)
- q = model_query(
- models.NodeTrait).filter_by(node_id=node_id, trait=trait)
- return model_query(q.exists()).scalar()
+ with _session_for_read() as session:
+ self._check_node_exists(session, node_id)
+ q = session.query(
+ models.NodeTrait).filter_by(node_id=node_id, trait=trait)
+ return session.query(q.exists()).scalar()
@oslo_db_api.retry_on_deadlock
def create_bios_setting_list(self, node_id, settings, version):
- self._check_node_exists(node_id)
bios_settings = []
with _session_for_write() as session:
+ self._check_node_exists(session, node_id)
try:
for setting in settings:
bios_setting = models.BIOSSetting(
@@ -1866,12 +1988,12 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def update_bios_setting_list(self, node_id, settings, version):
- self._check_node_exists(node_id)
bios_settings = []
with _session_for_write() as session:
+ self._check_node_exists(session, node_id)
try:
for setting in settings:
- query = model_query(models.BIOSSetting).filter_by(
+ query = session.query(models.BIOSSetting).filter_by(
node_id=node_id, name=setting['name'])
ref = query.one()
ref.update({'value': setting['value'],
@@ -1897,11 +2019,11 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def delete_bios_setting_list(self, node_id, names):
- self._check_node_exists(node_id)
missing_bios_settings = []
- with _session_for_write():
+ with _session_for_write() as session:
+ self._check_node_exists(session, node_id)
for name in names:
- count = model_query(models.BIOSSetting).filter_by(
+ count = session.query(models.BIOSSetting).filter_by(
node_id=node_id, name=name).delete()
if count == 0:
missing_bios_settings.append(name)
@@ -1910,20 +2032,22 @@ class Connection(api.Connection):
node=node_id, names=','.join(missing_bios_settings))
def get_bios_setting(self, node_id, name):
- self._check_node_exists(node_id)
- query = model_query(models.BIOSSetting).filter_by(
- node_id=node_id, name=name)
- try:
- ref = query.one()
- except NoResultFound:
- raise exception.BIOSSettingNotFound(node=node_id, name=name)
+ with _session_for_read() as session:
+ self._check_node_exists(session, node_id)
+ query = session.query(models.BIOSSetting).filter_by(
+ node_id=node_id, name=name)
+ try:
+ ref = query.one()
+ except NoResultFound:
+ raise exception.BIOSSettingNotFound(node=node_id, name=name)
return ref
def get_bios_setting_list(self, node_id):
- self._check_node_exists(node_id)
- result = (model_query(models.BIOSSetting)
- .filter_by(node_id=node_id)
- .all())
+ with _session_for_read() as session:
+ self._check_node_exists(session, node_id)
+ result = (session.query(models.BIOSSetting)
+ .filter_by(node_id=node_id)
+ .all())
return result
def get_allocation_by_id(self, allocation_id):
@@ -1933,11 +2057,13 @@ class Connection(api.Connection):
:returns: An allocation.
:raises: AllocationNotFound
"""
- query = model_query(models.Allocation).filter_by(id=allocation_id)
- try:
- return query.one()
- except NoResultFound:
- raise exception.AllocationNotFound(allocation=allocation_id)
+ with _session_for_read() as session:
+ query = session.query(models.Allocation).filter_by(
+ id=allocation_id)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.AllocationNotFound(allocation=allocation_id)
def get_allocation_by_uuid(self, allocation_uuid):
"""Return an allocation representation.
@@ -1946,11 +2072,13 @@ class Connection(api.Connection):
:returns: An allocation.
:raises: AllocationNotFound
"""
- query = model_query(models.Allocation).filter_by(uuid=allocation_uuid)
- try:
- return query.one()
- except NoResultFound:
- raise exception.AllocationNotFound(allocation=allocation_uuid)
+ with _session_for_read() as session:
+ query = session.query(models.Allocation).filter_by(
+ uuid=allocation_uuid)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.AllocationNotFound(allocation=allocation_uuid)
def get_allocation_by_name(self, name):
"""Return an allocation representation.
@@ -1959,11 +2087,12 @@ class Connection(api.Connection):
:returns: An allocation.
:raises: AllocationNotFound
"""
- query = model_query(models.Allocation).filter_by(name=name)
- try:
- return query.one()
- except NoResultFound:
- raise exception.AllocationNotFound(allocation=name)
+ with _session_for_read() as session:
+ query = session.query(models.Allocation).filter_by(name=name)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.AllocationNotFound(allocation=name)
def get_allocation_list(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
@@ -1982,8 +2111,9 @@ class Connection(api.Connection):
(asc, desc)
:returns: A list of allocations.
"""
- query = self._add_allocations_filters(model_query(models.Allocation),
- filters)
+ query = self._add_allocations_filters(
+ sa.select(models.Allocation),
+ filters)
return _paginate_query(models.Allocation, limit, marker,
sort_key, sort_dir, query)
@@ -2040,14 +2170,14 @@ class Connection(api.Connection):
with _session_for_write() as session:
try:
- query = model_query(models.Allocation, session=session)
+ query = session.query(models.Allocation)
query = add_identity_filter(query, allocation_id)
ref = query.one()
ref.update(values)
instance_uuid = ref.uuid
if values.get('node_id') and update_node:
- node = model_query(models.Node, session=session).filter_by(
+ node = session.query(models.Node).filter_by(
id=ref.node_id).with_for_update().one()
node_uuid = node.uuid
if node.instance_uuid and node.instance_uuid != ref.uuid:
@@ -2092,7 +2222,7 @@ class Connection(api.Connection):
"""
with _session_for_write() as session:
try:
- query = model_query(models.Allocation, session=session)
+ query = session.query(models.Allocation)
query = add_identity_filter(query, allocation_id)
# NOTE(dtantsur): the FOR UPDATE clause locks the allocation
ref = query.with_for_update().one()
@@ -2115,7 +2245,7 @@ class Connection(api.Connection):
:raises: AllocationNotFound
"""
with _session_for_write() as session:
- query = model_query(models.Allocation)
+ query = session.query(models.Allocation)
query = add_identity_filter(query, allocation_id)
try:
@@ -2125,7 +2255,7 @@ class Connection(api.Connection):
allocation_id = ref['id']
- node_query = model_query(models.Node, session=session).filter_by(
+ node_query = session.query(models.Node).filter_by(
allocation_id=allocation_id)
node_query.update({'allocation_id': None, 'instance_uuid': None})
@@ -2178,7 +2308,7 @@ class Connection(api.Connection):
return step.interface, step.step, sortable_args, step.priority
# List all existing steps for the template.
- current_steps = (model_query(models.DeployTemplateStep)
+ current_steps = (session.query(models.DeployTemplateStep)
.filter_by(deploy_template_id=template_id))
# List the new steps for the template.
@@ -2202,7 +2332,7 @@ class Connection(api.Connection):
# Delete and create steps in bulk as necessary.
if step_ids_to_delete:
- ((model_query(models.DeployTemplateStep)
+ ((session.query(models.DeployTemplateStep)
.filter(models.DeployTemplateStep.id.in_(step_ids_to_delete)))
.delete(synchronize_session=False))
if steps_to_create:
@@ -2218,70 +2348,81 @@ class Connection(api.Connection):
with _session_for_write() as session:
# NOTE(mgoddard): Don't issue a joined query for the update as
# this does not work with PostgreSQL.
- query = model_query(models.DeployTemplate)
+ query = session.query(models.DeployTemplate)
query = add_identity_filter(query, template_id)
- try:
- ref = query.with_for_update().one()
- except NoResultFound:
- raise exception.DeployTemplateNotFound(
- template=template_id)
-
+ ref = query.with_for_update().one()
# First, update non-step columns.
steps = values.pop('steps', None)
ref.update(values)
-
# If necessary, update steps.
if steps is not None:
self._update_deploy_template_steps(session, ref.id, steps)
+ session.flush()
+ with _session_for_read() as session:
# Return the updated template joined with all relevant fields.
- query = _get_deploy_template_query_with_steps()
+ query = _get_deploy_template_select_with_steps()
query = add_identity_filter(query, template_id)
- return query.one()
+ return session.execute(query).one()[0]
except db_exc.DBDuplicateEntry as e:
if 'name' in e.columns:
raise exception.DeployTemplateDuplicateName(
name=values['name'])
raise
+ except NoResultFound:
+ # TODO(TheJulia): What would unified core raise?!?
+ raise exception.DeployTemplateNotFound(
+ template=template_id)
@oslo_db_api.retry_on_deadlock
def destroy_deploy_template(self, template_id):
- with _session_for_write():
- model_query(models.DeployTemplateStep).filter_by(
+ with _session_for_write() as session:
+ session.query(models.DeployTemplateStep).filter_by(
deploy_template_id=template_id).delete()
- count = model_query(models.DeployTemplate).filter_by(
+ count = session.query(models.DeployTemplate).filter_by(
id=template_id).delete()
if count == 0:
raise exception.DeployTemplateNotFound(template=template_id)
def _get_deploy_template(self, field, value):
"""Helper method for retrieving a deploy template."""
- query = (_get_deploy_template_query_with_steps()
- .filter_by(**{field: value}))
+ query = (_get_deploy_template_select_with_steps()
+ .where(field == value))
try:
- return query.one()
+ # FIXME(TheJulia): This needs to be fixed for SQLAlchemy 2.0
+ with _session_for_read() as session:
+ return session.execute(query).one()[0]
except NoResultFound:
raise exception.DeployTemplateNotFound(template=value)
def get_deploy_template_by_id(self, template_id):
- return self._get_deploy_template('id', template_id)
+ return self._get_deploy_template(models.DeployTemplate.id,
+ template_id)
def get_deploy_template_by_uuid(self, template_uuid):
- return self._get_deploy_template('uuid', template_uuid)
+ return self._get_deploy_template(models.DeployTemplate.uuid,
+ template_uuid)
def get_deploy_template_by_name(self, template_name):
- return self._get_deploy_template('name', template_name)
+ return self._get_deploy_template(models.DeployTemplate.name,
+ template_name)
def get_deploy_template_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
- query = _get_deploy_template_query_with_steps()
+ query = model_query(models.DeployTemplate).options(
+ selectinload(models.DeployTemplate.steps))
return _paginate_query(models.DeployTemplate, limit, marker,
sort_key, sort_dir, query)
def get_deploy_template_list_by_names(self, names):
- query = (_get_deploy_template_query_with_steps()
- .filter(models.DeployTemplate.name.in_(names)))
- return query.all()
+ query = _get_deploy_template_select_with_steps()
+ with _session_for_read() as session:
+ res = session.execute(
+ query.where(
+ models.DeployTemplate.name.in_(names)
+ )
+ ).all()
+ return [r[0] for r in res]
@oslo_db_api.retry_on_deadlock
def create_node_history(self, values):
@@ -2299,8 +2440,8 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def destroy_node_history_by_uuid(self, history_uuid):
- with _session_for_write():
- query = model_query(models.NodeHistory).filter_by(
+ with _session_for_write() as session:
+ query = session.query(models.NodeHistory).filter_by(
uuid=history_uuid)
count = query.delete()
if count == 0:
@@ -2328,7 +2469,7 @@ class Connection(api.Connection):
def get_node_history_by_node_id(self, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.NodeHistory)
- query = query.filter_by(node_id=node_id)
+ query = query.where(models.NodeHistory.node_id == node_id)
return _paginate_query(models.NodeHistory, limit, marker,
sort_key, sort_dir, query)
@@ -2395,8 +2536,91 @@ class Connection(api.Connection):
# Uses input entry list, selects entries matching those ids
# then deletes them and does not synchronize the session so
# sqlalchemy doesn't do extra un-necessary work.
+ # NOTE(TheJulia): This is "legacy" syntax, but it is still
+ # valid and under the hood SQLAlchemy rewrites the form into
+ # a delete syntax.
session.query(
models.NodeHistory
).filter(
models.NodeHistory.id.in_(entries)
).delete(synchronize_session=False)
+
+ def count_nodes_in_provision_state(self, state):
+ if not isinstance(state, list):
+ state = [state]
+ with _session_for_read() as session:
+ # Intentionally does not use the full ORM model
+ # because that is de-duped by pkey, but we already
+ # have unique constraints on UUID/name, so... shouldn't
+ # be a big deal. #JuliaFamousLastWords.
+ # Anyway, intent here is to be as quick as possible and
+ # literally have the DB do *all* of the world, so no
+ # client side ops occur. The column is also indexed,
+ # which means this will be an index based response.
+ return session.scalar(
+ sa.select(
+ sa.func.count(models.Node.id)
+ ).filter(
+ or_(
+ models.Node.provision_state == v for v in state
+ )
+ )
+ )
+
+ @oslo_db_api.retry_on_deadlock
+ def create_node_inventory(self, values):
+ inventory = models.NodeInventory()
+ inventory.update(values)
+ with _session_for_write() as session:
+ try:
+ session.add(inventory)
+ session.flush()
+ except db_exc.DBDuplicateEntry:
+ raise exception.NodeInventoryAlreadyExists(
+ id=values['id'])
+ return inventory
+
+ @oslo_db_api.retry_on_deadlock
+ def destroy_node_inventory_by_node_id(self, node_id):
+ with _session_for_write() as session:
+ query = session.query(models.NodeInventory).filter_by(
+ node_id=node_id)
+ count = query.delete()
+ if count == 0:
+ raise exception.NodeInventoryNotFound(
+ node=node_id)
+
+ def get_node_inventory_by_node_id(self, node_id):
+ query = model_query(models.NodeInventory).filter_by(node_id=node_id)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.NodeInventoryNotFound(node=node_id)
+
+ def get_shard_list(self):
+ """Return a list of shards.
+
+ :returns: A list of dicts containing the keys name and count.
+ """
+ # Note(JayF): This should never be a large enough list to require
+ # pagination. Furthermore, it wouldn't really be a sensible
+ # thing to paginate as the data it's fetching can mutate.
+ # So we just aren't even going to try.
+ shard_list = []
+ with _session_for_read() as session:
+ res = session.execute(
+ # Note(JayF): SQLAlchemy counts are notoriously slow because
+ # sometimes they will use a subquery. Be careful
+ # before changing this to use any magic.
+ sa.text(
+ "SELECT count(id), shard from nodes group by shard;"
+ )).fetchall()
+
+ if res:
+ res.sort(key=lambda x: x[0], reverse=True)
+ for shard in res:
+ shard_list.append(
+ {"name": str(shard[1]), "count": shard[0]}
+ )
+
+ return shard_list
diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py
index 8f3f6a564..342491417 100644
--- a/ironic/db/sqlalchemy/models.py
+++ b/ironic/db/sqlalchemy/models.py
@@ -19,16 +19,18 @@ SQLAlchemy models for baremetal data.
"""
from os import path
+from typing import List
from urllib import parse as urlparse
from oslo_db import options as db_options
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import types as db_types
+from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy import Boolean, Column, DateTime, false, Index
from sqlalchemy import ForeignKey, Integer
from sqlalchemy import schema, String, Text
-from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
+from sqlalchemy.orm import declarative_base
from ironic.common import exception
from ironic.common.i18n import _
@@ -116,8 +118,8 @@ class ConductorHardwareInterfaces(Base):
default = Column(Boolean, default=False, nullable=False)
-class Node(Base):
- """Represents a bare metal node."""
+class NodeBase(Base):
+ """Represents a base bare metal node."""
__tablename__ = 'nodes'
__table_args__ = (
@@ -132,6 +134,7 @@ class Node(Base):
Index('reservation_idx', 'reservation'),
Index('conductor_group_idx', 'conductor_group'),
Index('resource_class_idx', 'resource_class'),
+ Index('shard_idx', 'shard'),
table_args())
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
@@ -212,6 +215,34 @@ class Node(Base):
boot_mode = Column(String(16), nullable=True)
secure_boot = Column(Boolean, nullable=True)
+ shard = Column(String(255), nullable=True)
+
+
+class Node(NodeBase):
+ """Represents a bare metal node."""
+
+ # NOTE(TheJulia): The purpose of the delineation between NodeBase and Node
+ # is to facilitate a hard delineation for queries where we do not need to
+ # populate additional information needlessly which would normally populate
+ # from the access of the property. In this case, Traits and Tags.
+ # The other reason we do this, is because these are generally "joined"
+ # data structures, we cannot de-duplicate node objects with unhashable dict
+ # data structures.
+
+ # NOTE(TheJulia): The choice of selectin lazy population is intentional
+ # as it causes a subselect to occur, skipping the need for deduplication
+ # in general. This puts a slightly higher query load on the DB server, but
+ # means *far* less gets shipped over the wire in the end.
+ traits: orm.Mapped[List['NodeTrait']] = orm.relationship( # noqa
+ "NodeTrait",
+ back_populates="node",
+ lazy="selectin")
+
+ tags: orm.Mapped[List['NodeTag']] = orm.relationship( # noqa
+ "NodeTag",
+ back_populates="node",
+ lazy="selectin")
+
class Port(Base):
"""Represents a network port of a bare metal node."""
@@ -235,6 +266,15 @@ class Port(Base):
is_smartnic = Column(Boolean, nullable=True, default=False)
name = Column(String(255), nullable=True)
+ _node_uuid = orm.relationship(
+ "Node",
+ viewonly=True,
+ primaryjoin="(Node.id == Port.node_id)",
+ lazy="selectin",
+ )
+ node_uuid = association_proxy(
+ "_node_uuid", "uuid", creator=lambda _i: Node(uuid=_i))
+
class Portgroup(Base):
"""Represents a group of network ports of a bare metal node."""
@@ -256,6 +296,15 @@ class Portgroup(Base):
mode = Column(String(255))
properties = Column(db_types.JsonEncodedDict)
+ _node_uuid = orm.relationship(
+ "Node",
+ viewonly=True,
+ primaryjoin="(Node.id == Portgroup.node_id)",
+ lazy="selectin",
+ )
+ node_uuid = association_proxy(
+ "_node_uuid", "uuid", creator=lambda _i: Node(uuid=_i))
+
class NodeTag(Base):
"""Represents a tag of a bare metal node."""
@@ -270,7 +319,6 @@ class NodeTag(Base):
node = orm.relationship(
"Node",
- backref='tags',
primaryjoin='and_(NodeTag.node_id == Node.id)',
foreign_keys=node_id
)
@@ -327,7 +375,6 @@ class NodeTrait(Base):
trait = Column(String(255), primary_key=True, nullable=False)
node = orm.relationship(
"Node",
- backref='traits',
primaryjoin='and_(NodeTrait.node_id == Node.id)',
foreign_keys=node_id
)
@@ -389,6 +436,10 @@ class DeployTemplate(Base):
uuid = Column(String(36))
name = Column(String(255), nullable=False)
extra = Column(db_types.JsonEncodedDict)
+ steps: orm.Mapped[List['DeployTemplateStep']] = orm.relationship( # noqa
+ "DeployTemplateStep",
+ back_populates="deploy_template",
+ lazy="selectin")
class DeployTemplateStep(Base):
@@ -409,7 +460,6 @@ class DeployTemplateStep(Base):
priority = Column(Integer, nullable=False)
deploy_template = orm.relationship(
"DeployTemplate",
- backref='steps',
primaryjoin=(
'and_(DeployTemplateStep.deploy_template_id == '
'DeployTemplate.id)'),
@@ -437,6 +487,18 @@ class NodeHistory(Base):
node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True)
+class NodeInventory(Base):
+ """Represents an inventory of a baremetal node."""
+ __tablename__ = 'node_inventory'
+ __table_args__ = (
+ Index('inventory_node_id_idx', 'node_id'),
+ table_args())
+ id = Column(Integer, primary_key=True)
+ inventory_data = Column(db_types.JsonEncodedDict(mysql_as_long=True))
+ plugin_data = Column(db_types.JsonEncodedDict(mysql_as_long=True))
+ node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True)
+
+
def get_class(model_name):
"""Returns the model class with the specified name.
diff --git a/ironic/dhcp/base.py b/ironic/dhcp/base.py
index 57a4e7911..b2b711307 100644
--- a/ironic/dhcp/base.py
+++ b/ironic/dhcp/base.py
@@ -102,3 +102,14 @@ class BaseDHCP(object, metaclass=abc.ABCMeta):
:raises: FailedToCleanDHCPOpts
"""
pass
+
+ def supports_ipxe_tag(self):
+ """Whether the provider will correctly apply the 'ipxe' tag.
+
+ When iPXE makes a DHCP request, does this provider support adding
+ the tag `ipxe` or `ipxe6` (for IPv6). When the provider returns True,
+ options can be added which filter on these tags.
+
+ :returns: True when the driver supports tagging iPXE DHCP requests
+ """
+ return False
diff --git a/ironic/dhcp/dnsmasq.py b/ironic/dhcp/dnsmasq.py
new file mode 100644
index 000000000..c6f27afe4
--- /dev/null
+++ b/ironic/dhcp/dnsmasq.py
@@ -0,0 +1,159 @@
+#
+# Copyright 2022 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from oslo_log import log as logging
+from oslo_utils import uuidutils
+
+from ironic.conf import CONF
+from ironic.dhcp import base
+
+LOG = logging.getLogger(__name__)
+
+
+class DnsmasqDHCPApi(base.BaseDHCP):
+ """API for managing host specific Dnsmasq configuration."""
+
+ def update_port_dhcp_opts(self, port_id, dhcp_options, token=None,
+ context=None):
+ pass
+
+ def update_dhcp_opts(self, task, options, vifs=None):
+ """Send or update the DHCP BOOT options for this node.
+
+ :param task: A TaskManager instance.
+ :param options: this will be a list of dicts, e.g.
+
+ ::
+
+ [{'opt_name': '67',
+ 'opt_value': 'pxelinux.0',
+ 'ip_version': 4},
+ {'opt_name': '66',
+ 'opt_value': '123.123.123.456',
+ 'ip_version': 4}]
+ :param vifs: Ignored argument
+ """
+ node = task.node
+ macs = set(self._pxe_enabled_macs(task.ports))
+
+ opt_file = self._opt_file_path(node)
+ tag = node.driver_internal_info.get('dnsmasq_tag')
+ if not tag:
+ tag = uuidutils.generate_uuid()
+ node.set_driver_internal_info('dnsmasq_tag', tag)
+ node.save()
+
+ LOG.debug('Writing to %s:', opt_file)
+ with open(opt_file, 'w') as f:
+ # Apply each option by tag
+ for option in options:
+ entry = 'tag:{tag},{opt_name},{opt_value}\n'.format(
+ tag=tag,
+ opt_name=option.get('opt_name'),
+ opt_value=option.get('opt_value'),
+ )
+ LOG.debug(entry)
+ f.write(entry)
+
+ for mac in macs:
+ host_file = self._host_file_path(mac)
+ LOG.debug('Writing to %s:', host_file)
+ with open(host_file, 'w') as f:
+ # Tag each address with the unique uuid scoped to
+ # this node and DHCP transaction
+ entry = '{mac},set:{tag},set:ironic\n'.format(
+ mac=mac, tag=tag)
+ LOG.debug(entry)
+ f.write(entry)
+
+ def _opt_file_path(self, node):
+ return os.path.join(CONF.dnsmasq.dhcp_optsdir,
+ 'ironic-{}.conf'.format(node.uuid))
+
+ def _host_file_path(self, mac):
+ return os.path.join(CONF.dnsmasq.dhcp_hostsdir,
+ 'ironic-{}.conf'.format(mac))
+
+ def _pxe_enabled_macs(self, ports):
+ for port in ports:
+ if port.pxe_enabled:
+ yield port.address
+
+ def get_ip_addresses(self, task):
+ """Get IP addresses for all ports/portgroups in `task`.
+
+ :param task: a TaskManager instance.
+ :returns: List of IP addresses associated with
+ task's ports/portgroups.
+ """
+ lease_path = CONF.dnsmasq.dhcp_leasefile
+ macs = set(self._pxe_enabled_macs(task.ports))
+ addresses = []
+ with open(lease_path, 'r') as f:
+ for line in f.readlines():
+ lease = line.split()
+ if lease[1] in macs:
+ addresses.append(lease[2])
+ LOG.debug('Found addresses for %s: %s',
+ task.node.uuid, ', '.join(addresses))
+ return addresses
+
+ def clean_dhcp_opts(self, task):
+ """Clean up the DHCP BOOT options for the host in `task`.
+
+ :param task: A TaskManager instance.
+
+ :raises: FailedToCleanDHCPOpts
+ """
+
+ node = task.node
+ # Discard this unique tag
+ node.del_driver_internal_info('dnsmasq_tag')
+ node.save()
+
+ # Changing the host rule to ignore will be picked up by dnsmasq
+ # without requiring a SIGHUP. When the mac address is active again
+ # this file will be replaced with one that applies a new unique tag.
+ macs = set(self._pxe_enabled_macs(task.ports))
+ for mac in macs:
+ host_file = self._host_file_path(mac)
+ with open(host_file, 'w') as f:
+ entry = '{mac},ignore\n'.format(mac=mac)
+ f.write(entry)
+
+ # Deleting the file containing dhcp-option won't remove the rules from
+ # dnsmasq but no requests will be tagged with the dnsmasq_tag uuid so
+ # these rules will not apply.
+ opt_file = self._opt_file_path(node)
+ if os.path.exists(opt_file):
+ os.remove(opt_file)
+
+ def supports_ipxe_tag(self):
+ """Whether the provider will correctly apply the 'ipxe' tag.
+
+ When iPXE makes a DHCP request, does this provider support adding
+ the tag `ipxe` or `ipxe6` (for IPv6). When the provider returns True,
+ options can be added which filter on these tags.
+
+ The `dnsmasq` provider sets this to True on the assumption that the
+ following is included in the dnsmasq.conf:
+
+ dhcp-match=set:ipxe,175
+
+ :returns: True
+ """
+ return True
diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py
index a5cb09282..06962ad42 100644
--- a/ironic/dhcp/neutron.py
+++ b/ironic/dhcp/neutron.py
@@ -278,3 +278,14 @@ class NeutronDHCPApi(base.BaseDHCP):
task, task.portgroups, client)
return port_ip_addresses + portgroup_ip_addresses
+
+ def supports_ipxe_tag(self):
+ """Whether the provider will correctly apply the 'ipxe' tag.
+
+ When iPXE makes a DHCP request, does this provider support adding
+ the tag `ipxe` or `ipxe6` (for IPv6). When the provider returns True,
+ options can be added which filter on these tags.
+
+ :returns: True
+ """
+ return True
diff --git a/ironic/drivers/ilo.py b/ironic/drivers/ilo.py
index 10676b411..b6e189ee9 100644
--- a/ironic/drivers/ilo.py
+++ b/ironic/drivers/ilo.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -37,7 +38,7 @@ class IloHardware(generic.GenericHardware):
@property
def supported_boot_interfaces(self):
"""List of supported boot interfaces."""
- return [boot.IloVirtualMediaBoot, boot.IloPXEBoot, boot.IloiPXEBoot]
+ return [boot.IloVirtualMediaBoot, boot.IloiPXEBoot, boot.IloPXEBoot]
@property
def supported_bios_interfaces(self):
@@ -67,7 +68,7 @@ class IloHardware(generic.GenericHardware):
@property
def supported_vendor_interfaces(self):
- """List of supported power interfaces."""
+ """List of supported vendor interfaces."""
return [vendor.VendorPassthru, noop.NoVendor]
diff --git a/ironic/drivers/irmc.py b/ironic/drivers/irmc.py
index f3c2d7c65..06408359b 100644
--- a/ironic/drivers/irmc.py
+++ b/ironic/drivers/irmc.py
@@ -27,6 +27,7 @@ from ironic.drivers.modules.irmc import inspect
from ironic.drivers.modules.irmc import management
from ironic.drivers.modules.irmc import power
from ironic.drivers.modules.irmc import raid
+from ironic.drivers.modules.irmc import vendor
from ironic.drivers.modules import noop
from ironic.drivers.modules import pxe
@@ -48,8 +49,8 @@ class IRMCHardware(generic.GenericHardware):
"""List of supported boot interfaces."""
# NOTE: Support for pxe boot is deprecated, and will be
# removed from the list in the future.
- return [boot.IRMCVirtualMediaBoot, boot.IRMCPXEBoot,
- ipxe.iPXEBoot, pxe.PXEBoot]
+ return [boot.IRMCVirtualMediaBoot, ipxe.iPXEBoot,
+ boot.IRMCPXEBoot, pxe.PXEBoot]
@property
def supported_console_interfaces(self):
@@ -77,3 +78,8 @@ class IRMCHardware(generic.GenericHardware):
def supported_raid_interfaces(self):
"""List of supported raid interfaces."""
return [noop.NoRAID, raid.IRMCRAID, agent.AgentRAID]
+
+ @property
+ def supported_vendor_interfaces(self):
+ """List of supported vendor interfaces."""
+ return [noop.NoVendor, vendor.IRMCVendorPassthru]
diff --git a/ironic/drivers/modules/agent_base.py b/ironic/drivers/modules/agent_base.py
index ff2a454ea..7732840d5 100644
--- a/ironic/drivers/modules/agent_base.py
+++ b/ironic/drivers/modules/agent_base.py
@@ -100,7 +100,7 @@ _FASTTRACK_HEARTBEAT_ALLOWED = (states.DEPLOYWAIT, states.CLEANWAIT,
FASTTRACK_HEARTBEAT_ALLOWED = frozenset(_FASTTRACK_HEARTBEAT_ALLOWED)
-@METRICS.timer('post_clean_step_hook')
+@METRICS.timer('AgentBase.post_clean_step_hook')
def post_clean_step_hook(interface, step):
"""Decorator method for adding a post clean step hook.
@@ -128,7 +128,7 @@ def post_clean_step_hook(interface, step):
return decorator
-@METRICS.timer('post_deploy_step_hook')
+@METRICS.timer('AgentBase.post_deploy_step_hook')
def post_deploy_step_hook(interface, step):
"""Decorator method for adding a post deploy step hook.
@@ -279,7 +279,7 @@ def _get_completed_command(task, commands, step_type):
return last_command
-@METRICS.timer('log_and_raise_deployment_error')
+@METRICS.timer('AgentBase.log_and_raise_deployment_error')
def log_and_raise_deployment_error(task, msg, collect_logs=True, exc=None):
"""Helper method to log the error and raise exception.
diff --git a/ironic/drivers/modules/boot_mode_utils.py b/ironic/drivers/modules/boot_mode_utils.py
index bb3ccc3a4..e48cea32d 100644
--- a/ironic/drivers/modules/boot_mode_utils.py
+++ b/ironic/drivers/modules/boot_mode_utils.py
@@ -137,7 +137,7 @@ def sync_boot_mode(task):
elif ironic_boot_mode != bm_boot_mode:
msg = (_("Boot mode %(node_boot_mode)s currently configured "
"on node %(uuid)s does not match the boot mode "
- "%(ironic_boot_mode)s requested for provisioning."
+ "%(ironic_boot_mode)s requested for provisioning. "
"Attempting to set node boot mode to %(ironic_boot_mode)s.") %
{'uuid': node.uuid, 'node_boot_mode': bm_boot_mode,
'ironic_boot_mode': ironic_boot_mode})
diff --git a/ironic/drivers/modules/console_utils.py b/ironic/drivers/modules/console_utils.py
index 6e08b6712..c5e9e857a 100644
--- a/ironic/drivers/modules/console_utils.py
+++ b/ironic/drivers/modules/console_utils.py
@@ -90,7 +90,7 @@ def _get_console_pid(node_uuid):
with open(pid_path, 'r') as f:
pid_str = f.readline()
return int(pid_str)
- except (IOError, ValueError):
+ except (IOError, ValueError, FileNotFoundError):
raise exception.NoConsolePid(pid_path=pid_path)
diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py
index ae06f0dfa..8bad02bba 100644
--- a/ironic/drivers/modules/drac/raid.py
+++ b/ironic/drivers/modules/drac/raid.py
@@ -1327,6 +1327,8 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
"""Perform post delete_configuration action to commit the config.
Clears foreign configuration for all RAID controllers.
+ If no foreign configuration to clear, then checks if any controllers
+ can be converted to RAID mode.
:param task: a TaskManager instance containing the node to act on.
:param raid_configs: a list of dictionaries containing the RAID
@@ -1338,7 +1340,15 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
async_proc = DracRedfishRAID._clear_foreign_config(system, task)
if async_proc:
# Async processing with system rebooting in progress
+ task.node.set_driver_internal_info(
+ 'raid_config_substep', 'clear_foreign_config')
+ task.node.save()
return deploy_utils.get_async_step_return_state(task.node)
+ else:
+ conv_state = DracRedfishRAID._convert_controller_to_raid_mode(
+ task)
+ if conv_state:
+ return conv_state
return return_state
@@ -1486,6 +1496,69 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
task_mon.wait(CONF.drac.raid_job_timeout)
return False
+ @staticmethod
+ def _convert_controller_to_raid_mode(task):
+ """Convert eligible controllers to RAID mode if not already.
+
+ :param task: a TaskManager instance containing the node to act on
+ :returns: Return state if there are controllers to convert and
+ and rebooting, otherwise None.
+ """
+
+ system = redfish_utils.get_system(task.node)
+ task_mons = []
+ warning_msg_templ = (
+ 'Possibly because `%(pkg)s` is too old. Without newer `%(pkg)s` '
+ 'PERC 9 and PERC 10 controllers that are not in RAID mode will '
+ 'not be used or have limited RAID support. To avoid that update '
+ '`%(pkg)s`')
+ for storage in system.storage.get_members():
+ storage_controllers = None
+ try:
+ storage_controllers = storage.controllers
+ except sushy.exceptions.MissingAttributeError:
+ # Check if there storage_controllers to separate old iDRAC and
+ # storage without controller
+ if storage.storage_controllers:
+ LOG.warning('%(storage)s does not have controllers for '
+ 'node %(node)s' + warning_msg_templ,
+ {'storage': storage.identity,
+ 'node': task.node.uuid,
+ 'pkg': 'iDRAC'})
+ continue
+ except AttributeError:
+ LOG.warning('%(storage)s does not have controllers attribute. '
+ + warning_msg_templ, {'storage': storage.identity,
+ 'pkg': 'sushy'})
+ return None
+ if storage_controllers:
+ controller = storage.controllers.get_members()[0]
+ try:
+ oem_controller = controller.get_oem_extension('Dell')
+ except sushy.exceptions.ExtensionError as ee:
+ LOG.warning('Failed to find extension to convert '
+ 'controller to RAID mode. '
+ + warning_msg_templ + '. Error: %(err)s',
+ {'err': ee, 'pkg': 'sushy-oem-idrac'})
+ return None
+ task_mon = oem_controller.convert_to_raid()
+ if task_mon:
+ task_mons.append(task_mon)
+
+ if task_mons:
+ deploy_utils.set_async_step_flags(
+ task.node,
+ reboot=True,
+ skip_current_step=True,
+ polling=True)
+
+ task.upgrade_lock()
+ task.node.set_driver_internal_info(
+ 'raid_task_monitor_uris',
+ [tm.task_monitor_uri for tm in task_mons])
+ task.node.save()
+ return deploy_utils.reboot_to_finish_step(task)
+
@METRICS.timer('DracRedfishRAID._query_raid_tasks_status')
@periodics.node_periodic(
purpose='checking async RAID tasks',
@@ -1545,6 +1618,15 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
else:
# all tasks completed and none of them failed
node.del_driver_internal_info('raid_task_monitor_uris')
+ substep = node.driver_internal_info.get(
+ 'raid_config_substep')
+ if substep == 'clear_foreign_config':
+ node.del_driver_internal_info('raid_config_substep')
+ node.save()
+ res = DracRedfishRAID._convert_controller_to_raid_mode(
+ task)
+ if res: # New tasks submitted
+ return
self._set_success(task)
node.save()
diff --git a/ironic/drivers/modules/fake.py b/ironic/drivers/modules/fake.py
index dffd9065d..0a26efb4c 100644
--- a/ironic/drivers/modules/fake.py
+++ b/ironic/drivers/modules/fake.py
@@ -24,6 +24,9 @@ functionality between a power interface and a deploy interface, when both rely
on separate vendor_passthru methods.
"""
+import random
+import time
+
from oslo_log import log
from ironic.common import boot_devices
@@ -32,6 +35,7 @@ from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import indicator_states
from ironic.common import states
+from ironic.conf import CONF
from ironic.drivers import base
from ironic import objects
@@ -39,6 +43,34 @@ from ironic import objects
LOG = log.getLogger(__name__)
+def parse_sleep_range(sleep_range):
+ if not sleep_range:
+ return 0, 0
+
+ sleep_split = sleep_range.split(',')
+ if len(sleep_split) == 1:
+ a = sleep_split[0]
+ b = sleep_split[0]
+ else:
+ a = sleep_split[0]
+ b = sleep_split[1]
+ return int(a), int(b)
+
+
+def sleep(sleep_range):
+ earliest, latest = parse_sleep_range(sleep_range)
+ if earliest == 0 and latest == 0:
+ # no sleep
+ return
+ if earliest == latest:
+ # constant sleep
+ sleep = earliest
+ else:
+ # triangular random sleep, weighted towards the earliest
+ sleep = random.triangular(earliest, latest, earliest)
+ time.sleep(sleep)
+
+
class FakePower(base.PowerInterface):
"""Example implementation of a simple power interface."""
@@ -49,12 +81,15 @@ class FakePower(base.PowerInterface):
pass
def get_power_state(self, task):
+ sleep(CONF.fake.power_delay)
return task.node.power_state
def reboot(self, task, timeout=None):
+ sleep(CONF.fake.power_delay)
pass
def set_power_state(self, task, power_state, timeout=None):
+ sleep(CONF.fake.power_delay)
if power_state not in [states.POWER_ON, states.POWER_OFF,
states.SOFT_REBOOT, states.SOFT_POWER_OFF]:
raise exception.InvalidParameterValue(
@@ -81,15 +116,19 @@ class FakeBoot(base.BootInterface):
pass
def prepare_ramdisk(self, task, ramdisk_params, mode='deploy'):
+ sleep(CONF.fake.boot_delay)
pass
def clean_up_ramdisk(self, task, mode='deploy'):
+ sleep(CONF.fake.boot_delay)
pass
def prepare_instance(self, task):
+ sleep(CONF.fake.boot_delay)
pass
def clean_up_instance(self, task):
+ sleep(CONF.fake.boot_delay)
pass
@@ -108,18 +147,23 @@ class FakeDeploy(base.DeployInterface):
@base.deploy_step(priority=100)
def deploy(self, task):
+ sleep(CONF.fake.deploy_delay)
return None
def tear_down(self, task):
+ sleep(CONF.fake.deploy_delay)
return states.DELETED
def prepare(self, task):
+ sleep(CONF.fake.deploy_delay)
pass
def clean_up(self, task):
+ sleep(CONF.fake.deploy_delay)
pass
def take_over(self, task):
+ sleep(CONF.fake.deploy_delay)
pass
@@ -140,6 +184,7 @@ class FakeVendorA(base.VendorInterface):
@base.passthru(['POST'],
description=_("Test if the value of bar is baz"))
def first_method(self, task, http_method, bar):
+ sleep(CONF.fake.vendor_delay)
return True if bar == 'baz' else False
@@ -161,16 +206,19 @@ class FakeVendorB(base.VendorInterface):
@base.passthru(['POST'],
description=_("Test if the value of bar is kazoo"))
def second_method(self, task, http_method, bar):
+ sleep(CONF.fake.vendor_delay)
return True if bar == 'kazoo' else False
@base.passthru(['POST'], async_call=False,
description=_("Test if the value of bar is meow"))
def third_method_sync(self, task, http_method, bar):
+ sleep(CONF.fake.vendor_delay)
return True if bar == 'meow' else False
@base.passthru(['POST'], require_exclusive_lock=False,
description=_("Test if the value of bar is woof"))
def fourth_method_shared_lock(self, task, http_method, bar):
+ sleep(CONF.fake.vendor_delay)
return True if bar == 'woof' else False
@@ -211,17 +259,21 @@ class FakeManagement(base.ManagementInterface):
return [boot_devices.PXE]
def set_boot_device(self, task, device, persistent=False):
+ sleep(CONF.fake.management_delay)
if device not in self.get_supported_boot_devices(task):
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
def get_boot_device(self, task):
+ sleep(CONF.fake.management_delay)
return {'boot_device': boot_devices.PXE, 'persistent': False}
def get_sensors_data(self, task):
+ sleep(CONF.fake.management_delay)
return {}
def get_supported_indicators(self, task, component=None):
+ sleep(CONF.fake.management_delay)
indicators = {
components.CHASSIS: {
'led-0': {
@@ -248,6 +300,7 @@ class FakeManagement(base.ManagementInterface):
if not component or component == c}
def get_indicator_state(self, task, component, indicator):
+ sleep(CONF.fake.management_delay)
indicators = self.get_supported_indicators(task)
if component not in indicators:
raise exception.InvalidParameterValue(_(
@@ -271,6 +324,7 @@ class FakeInspect(base.InspectInterface):
pass
def inspect_hardware(self, task):
+ sleep(CONF.fake.inspect_delay)
return states.MANAGEABLE
@@ -282,9 +336,11 @@ class FakeRAID(base.RAIDInterface):
def create_configuration(self, task, create_root_volume=True,
create_nonroot_volumes=True):
+ sleep(CONF.fake.raid_delay)
pass
def delete_configuration(self, task):
+ sleep(CONF.fake.raid_delay)
pass
@@ -302,6 +358,7 @@ class FakeBIOS(base.BIOSInterface):
'to contain a dictionary with name/value pairs'),
'required': True}})
def apply_configuration(self, task, settings):
+ sleep(CONF.fake.bios_delay)
# Note: the implementation of apply_configuration in fake interface
# is just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
@@ -328,6 +385,7 @@ class FakeBIOS(base.BIOSInterface):
@base.clean_step(priority=0)
def factory_reset(self, task):
+ sleep(CONF.fake.bios_delay)
# Note: the implementation of factory_reset in fake interface is
# just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
@@ -340,6 +398,7 @@ class FakeBIOS(base.BIOSInterface):
@base.clean_step(priority=0)
def cache_bios_settings(self, task):
+ sleep(CONF.fake.bios_delay)
# Note: the implementation of cache_bios_settings in fake interface
# is just for testing purpose, for real driver implementation, please
# refer to develop doc at https://docs.openstack.org/ironic/latest/
@@ -357,9 +416,11 @@ class FakeStorage(base.StorageInterface):
return {}
def attach_volumes(self, task):
+ sleep(CONF.fake.storage_delay)
pass
def detach_volumes(self, task):
+ sleep(CONF.fake.storage_delay)
pass
def should_write_image(self, task):
@@ -376,7 +437,9 @@ class FakeRescue(base.RescueInterface):
pass
def rescue(self, task):
+ sleep(CONF.fake.rescue_delay)
return states.RESCUE
def unrescue(self, task):
+ sleep(CONF.fake.rescue_delay)
return states.ACTIVE
diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py
index 2b5b8c0db..13f975c67 100644
--- a/ironic/drivers/modules/ilo/common.py
+++ b/ironic/drivers/modules/ilo/common.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -31,6 +32,7 @@ from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
+from ironic.common import image_service
from ironic.common import images
from ironic.common import swift
from ironic.common import utils
@@ -494,6 +496,26 @@ def update_ipmi_properties(task):
task.node.driver_info = info
+def update_redfish_properties(task):
+ """Update redfish properties to node driver_info
+
+ This method updates the node's driver info with redfish driver driver_info.
+ :param task: a task from TaskManager.
+ """
+ node = task.node
+ info = node.driver_info
+
+ # updating redfish credentials
+ info['redfish_address'] = info.get('ilo_address')
+ info['redfish_username'] = info.get('ilo_username')
+ info['redfish_password'] = info.get('ilo_password')
+ info['redfish_verify_ca'] = info.get('ilo_verify_ca')
+ info['redfish_system_id'] = '/redfish/v1/Systems/1'
+
+ # saving redfish credentials to task object
+ task.node.driver_info = info
+
+
def _get_floppy_image_name(node):
"""Returns the floppy image name for a given node.
@@ -1126,3 +1148,23 @@ def setup_uefi_https(task, iso, persistent=False):
except ilo_error.IloError as ilo_exception:
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
+
+
+def download(target_file, file_url):
+ """Downloads file based on the scheme.
+
+ It downloads the file (url) to given location.
+ The supported url schemes are file, http, and https.
+ :param target_file: target file for copying the downloaded file.
+ :param file_url: source file url from where file needs to be downloaded.
+ :raises: ImageDownloadFailed, on failure to download the file.
+ """
+ parsed_url = urlparse.urlparse(file_url)
+ if parsed_url.scheme == "file":
+ src_file = parsed_url.path
+ with open(target_file, 'wb') as fd:
+ image_service.FileImageService().download(src_file, fd)
+ elif parsed_url.scheme in ('http', 'https'):
+ src_file = parsed_url.geturl()
+ with open(target_file, 'wb') as fd:
+ image_service.HttpImageService().download(src_file, fd)
diff --git a/ironic/drivers/modules/ilo/management.py b/ironic/drivers/modules/ilo/management.py
index c9a8259e6..5c4f03fb6 100644
--- a/ironic/drivers/modules/ilo/management.py
+++ b/ironic/drivers/modules/ilo/management.py
@@ -14,7 +14,8 @@
"""
iLO Management Interface
"""
-
+import os
+import shutil
from urllib import parse as urlparse
from ironic_lib import metrics_utils
@@ -79,6 +80,27 @@ _RESET_ILO_CREDENTIALS_ARGSINFO = {
}
}
+_CREATE_CSR_ARGSINFO = {
+ 'csr_params': {
+ 'description': (
+ "This arguments represents the information needed "
+ "to create the CSR certificate. The keys to be provided are "
+ "City, CommonName, OrgName, State."
+ ),
+ 'required': True
+ }
+}
+
+_ADD_HTTPS_CERT_ARGSINFO = {
+ 'cert_file': {
+ 'description': (
+ "This argument represents the path to the signed HTTPS "
+ "certificate which will be added to the iLO."
+ ),
+ 'required': True
+ }
+}
+
_SECURITY_PARAMETER_UPDATE_ARGSINFO = {
'security_parameters': {
'description': (
@@ -574,6 +596,61 @@ class IloManagement(base.ManagementInterface):
"parameter for node %(node)s is updated",
{'node': node.uuid})
+ @METRICS.timer('IloManagement.create_csr')
+ @base.clean_step(priority=0, abortable=False,
+ argsinfo=_CREATE_CSR_ARGSINFO)
+ def create_csr(self, task, **kwargs):
+ """Creates the CSR.
+
+ :param task: a TaskManager object.
+ """
+ node = task.node
+ csr_params = kwargs.get('csr_params')
+ csr_path = CONF.ilo.cert_path
+ path = os.path.join(csr_path, task.node.uuid)
+ if not os.path.exists(path):
+ os.makedirs(path, 0o755)
+
+ LOG.debug("Creating CSR for node %(node)s ..",
+ {'node': node.uuid})
+ _execute_ilo_step(node, 'create_csr', path, csr_params)
+ LOG.info("Creation of CSR for node %(node)s is "
+ "completed.", {'node': node.uuid})
+
+ @METRICS.timer('IloManagement.add_https_certificate')
+ @base.clean_step(priority=0, abortable=False,
+ argsinfo=_ADD_HTTPS_CERT_ARGSINFO)
+ def add_https_certificate(self, task, **kwargs):
+ """Adds the signed HTTPS certificate to the iLO.
+
+ :param task: a TaskManager object.
+ """
+ node = task.node
+ csr_path = CONF.ilo.cert_path
+ path = os.path.join(csr_path, task.node.uuid)
+ if not os.path.exists(path):
+ os.makedirs(path, 0o755)
+ cert_file_name = node.uuid + ".crt"
+ cert_file_path = os.path.join(path, cert_file_name)
+ cert_file = kwargs.get('cert_file')
+ url_scheme = urlparse.urlparse(cert_file).scheme
+ if url_scheme == '':
+ shutil.copy(cert_file, cert_file_path)
+ elif url_scheme in ('http', 'https', 'file'):
+ ilo_common.download(cert_file_path, cert_file)
+ else:
+ msg = (_("The url scheme %(scheme)s not supported with clean step "
+ "%(step)s") % {'scheme': url_scheme,
+ 'step': 'add_https_certificate'})
+ raise exception.IloOperationNotSupported(operation='clean step',
+ error=msg)
+
+ LOG.debug("Adding the signed HTTPS certificate to the "
+ "node %(node)s ..", {'node': node.uuid})
+ _execute_ilo_step(node, 'add_https_certificate', cert_file_path)
+ LOG.info("Adding of HTTPS certificate to the node %(node)s "
+ "is completed.", {'node': node.uuid})
+
@METRICS.timer('IloManagement.update_firmware')
@base.deploy_step(priority=0, argsinfo=_FIRMWARE_UPDATE_ARGSINFO)
@base.clean_step(priority=0, abortable=False,
diff --git a/ironic/drivers/modules/ilo/vendor.py b/ironic/drivers/modules/ilo/vendor.py
index 2f4986a2f..fa0400703 100644
--- a/ironic/drivers/modules/ilo/vendor.py
+++ b/ironic/drivers/modules/ilo/vendor.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -25,16 +26,14 @@ from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
+from ironic.drivers.modules.redfish import vendor as redfish_vendor
METRICS = metrics_utils.get_metrics_logger(__name__)
-class VendorPassthru(base.VendorInterface):
+class VendorPassthru(redfish_vendor.RedfishVendorPassthru):
"""Vendor-specific interfaces for iLO deploy drivers."""
- def get_properties(self):
- return {}
-
@METRICS.timer('IloVendorPassthru.validate')
def validate(self, task, method, **kwargs):
"""Validate vendor-specific actions.
@@ -50,10 +49,26 @@ class VendorPassthru(base.VendorInterface):
passed.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
+ :raises: IloOperationNotSupported, if the driver does not support the
+ given operation with ilo vendor interface.
"""
if method == 'boot_into_iso':
self._validate_boot_into_iso(task, kwargs)
return
+ redfish_event_methods = ['create_subscription',
+ 'delete_subscription',
+ 'get_all_subscriptions', 'get_subscription']
+ if method in redfish_event_methods:
+ self._validate_is_it_a_supported_system(task)
+ ilo_common.parse_driver_info(task.node)
+ ilo_common.update_redfish_properties(task)
+ if method == 'eject_vmedia':
+ error_message = _(method + (
+ " can not be performed as the driver does not support "
+ "eject_vmedia through ilo vendor interface"))
+ raise exception.IloOperationNotSupported(operation=method,
+ error=error_message)
+
super(VendorPassthru, self).validate(task, method, **kwargs)
def _validate_boot_into_iso(self, task, kwargs):
@@ -99,3 +114,23 @@ class VendorPassthru(base.VendorInterface):
ilo_common.setup_vmedia(task, kwargs['boot_iso_href'],
ramdisk_options=None)
manager_utils.node_power_action(task, states.REBOOT)
+
+ def _validate_is_it_a_supported_system(self, task):
+ """Verify and raise an exception if it is not a supported system.
+
+ :param task: A TaskManager object.
+ :param kwargs: The arguments sent with vendor passthru.
+ :raises: IloOperationNotSupported, if the node is not a Gen10 or
+ Gen10 Plus system.
+ """
+
+ node = task.node
+ ilo_object = ilo_common.get_ilo_object(node)
+ product_name = ilo_object.get_product_name()
+ operation = _("Event methods")
+ error_message = _(operation + (
+ " can not be performed as the driver does not support Event "
+ "methods on the given node"))
+ if 'Gen10' not in product_name:
+ raise exception.IloOperationNotSupported(operation=operation,
+ error=error_message)
diff --git a/ironic/drivers/modules/image_utils.py b/ironic/drivers/modules/image_utils.py
index 304c199bf..86607ee25 100644
--- a/ironic/drivers/modules/image_utils.py
+++ b/ironic/drivers/modules/image_utils.py
@@ -211,6 +211,16 @@ class ImageHandler(object):
try:
os.link(image_file, published_file)
os.chmod(image_file, self._file_permission)
+ try:
+ utils.execute(
+ '/usr/sbin/restorecon', '-i', '-R', 'v', public_dir)
+ except FileNotFoundError as exc:
+ LOG.debug(
+ "Could not restore SELinux context on "
+ "%(public_dir)s, restorecon command not found.\n"
+ "Error: %(error)s",
+ {'public_dir': public_dir,
+ 'error': exc})
except OSError as exc:
LOG.debug(
diff --git a/ironic/drivers/modules/inspect_utils.py b/ironic/drivers/modules/inspect_utils.py
index 89a13e658..0089302c1 100644
--- a/ironic/drivers/modules/inspect_utils.py
+++ b/ironic/drivers/modules/inspect_utils.py
@@ -15,11 +15,16 @@
from oslo_log import log as logging
from oslo_utils import netutils
+import swiftclient.exceptions
from ironic.common import exception
+from ironic.common import swift
+from ironic.conf import CONF
from ironic import objects
+from ironic.objects import node_inventory
LOG = logging.getLogger(__name__)
+_OBJECT_NAME_PREFIX = 'inspector_data'
def create_ports_if_not_exist(task, macs):
@@ -51,3 +56,164 @@ def create_ports_if_not_exist(task, macs):
except exception.MACAlreadyExists:
LOG.info("Port already exists for MAC address %(address)s "
"for node %(node)s", {'address': mac, 'node': node.uuid})
+
+
+def clean_up_swift_entries(task):
+ """Delete swift entries containing introspection data.
+
+ Delete swift entries related to the node in task.node containing
+ introspection data. The entries are
+ ``inspector_data-<task.node.uuid>-inventory`` for hardware inventory and
+ similar for ``-plugin`` containing the rest of the introspection data.
+
+ :param task: A TaskManager instance.
+ """
+ if CONF.inventory.data_backend != 'swift':
+ return
+ swift_api = swift.SwiftAPI()
+ swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, task.node.uuid)
+ container = CONF.inventory.swift_data_container
+ inventory_obj_name = swift_object_name + '-inventory'
+ plugin_obj_name = swift_object_name + '-plugin'
+ try:
+ swift_api.delete_object(inventory_obj_name, container)
+ except swiftclient.exceptions.ClientException as e:
+ if e.http_status == 404:
+ # 404 -> entry did not exist - acceptable.
+ pass
+ else:
+ LOG.error("Object %(obj)s related to node %(node)s "
+ "failed to be deleted with expection: %(e)s",
+ {'obj': inventory_obj_name, 'node': task.node.uuid,
+ 'e': e})
+ raise exception.SwiftObjectStillExists(obj=inventory_obj_name,
+ node=task.node.uuid)
+ try:
+ swift_api.delete_object(plugin_obj_name, container)
+ except swiftclient.exceptions.ClientException as e:
+ if e.http_status == 404:
+ # 404 -> entry did not exist - acceptable.
+ pass
+ else:
+ LOG.error("Object %(obj)s related to node %(node)s "
+ "failed to be deleted with exception: %(e)s",
+ {'obj': plugin_obj_name, 'node': task.node.uuid,
+ 'e': e})
+ raise exception.SwiftObjectStillExists(obj=plugin_obj_name,
+ node=task.node.uuid)
+
+
+def store_introspection_data(node, introspection_data, context):
+ """Store introspection data.
+
+ Store the introspection data for a node. Either to database
+ or swift as configured.
+
+ :param node: the Ironic node that the introspection data is about
+ :param introspection_data: the data to store
+ :param context: an admin context
+ """
+ # If store_data == 'none', do not store the data
+ store_data = CONF.inventory.data_backend
+ if store_data == 'none':
+ LOG.debug('Introspection data storage is disabled, the data will '
+ 'not be saved for node %(node)s', {'node': node.uuid})
+ return
+ inventory_data = introspection_data.pop("inventory")
+ plugin_data = introspection_data
+ if store_data == 'database':
+ node_inventory.NodeInventory(
+ context,
+ node_id=node.id,
+ inventory_data=inventory_data,
+ plugin_data=plugin_data).create()
+ LOG.info('Introspection data was stored in database for node '
+ '%(node)s', {'node': node.uuid})
+ if store_data == 'swift':
+ swift_object_name = _store_introspection_data_in_swift(
+ node_uuid=node.uuid,
+ inventory_data=inventory_data,
+ plugin_data=plugin_data)
+ LOG.info('Introspection data was stored for node %(node)s in Swift'
+ ' object %(obj_name)s-inventory and %(obj_name)s-plugin',
+ {'node': node.uuid, 'obj_name': swift_object_name})
+
+
+def _node_inventory_convert(node_inventory):
+ inventory_data = node_inventory['inventory_data']
+ plugin_data = node_inventory['plugin_data']
+ return {"inventory": inventory_data, "plugin_data": plugin_data}
+
+
+def get_introspection_data(node, context):
+ """Get introspection data.
+
+ Retrieve the introspection data for a node. Either from database
+ or swift as configured.
+
+ :param node_id: the Ironic node that the required data is about
+ :param context: an admin context
+ :returns: dictionary with ``inventory`` and ``plugin_data`` fields
+ """
+ store_data = CONF.inventory.data_backend
+ if store_data == 'none':
+ raise exception.NodeInventoryNotFound(node=node.uuid)
+ if store_data == 'database':
+ node_inventory = objects.NodeInventory.get_by_node_id(
+ context, node.id)
+ return _node_inventory_convert(node_inventory)
+ if store_data == 'swift':
+ try:
+ node_inventory = _get_introspection_data_from_swift(node.uuid)
+ except exception.SwiftObjectNotFoundError:
+ raise exception.NodeInventoryNotFound(node=node.uuid)
+ return node_inventory
+
+
+def _store_introspection_data_in_swift(node_uuid, inventory_data, plugin_data):
+ """Uploads introspection data to Swift.
+
+ :param data: data to store in Swift
+ :param node_id: ID of the Ironic node that the data came from
+ :returns: name of the Swift object that the data is stored in
+ """
+ swift_api = swift.SwiftAPI()
+ swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
+ container = CONF.inventory.swift_data_container
+ swift_api.create_object_from_data(swift_object_name + '-inventory',
+ inventory_data,
+ container)
+ swift_api.create_object_from_data(swift_object_name + '-plugin',
+ plugin_data,
+ container)
+ return swift_object_name
+
+
+def _get_introspection_data_from_swift(node_uuid):
+ """Get introspection data from Swift.
+
+ :param node_uuid: UUID of the Ironic node that the data came from
+ :returns: dictionary with ``inventory`` and ``plugin_data`` fields
+ """
+ swift_api = swift.SwiftAPI()
+ swift_object_name = '%s-%s' % (_OBJECT_NAME_PREFIX, node_uuid)
+ container = CONF.inventory.swift_data_container
+ inv_obj = swift_object_name + '-inventory'
+ plug_obj = swift_object_name + '-plugin'
+ try:
+ inventory_data = swift_api.get_object(inv_obj, container)
+ except exception.SwiftOperationError:
+ LOG.error("Failed to retrieve object %(obj)s from swift",
+ {'obj': inv_obj})
+ raise exception.SwiftObjectNotFoundError(obj=inv_obj,
+ container=container,
+ operation='get')
+ try:
+ plugin_data = swift_api.get_object(plug_obj, container)
+ except exception.SwiftOperationError:
+ LOG.error("Failed to retrieve object %(obj)s from swift",
+ {'obj': plug_obj})
+ raise exception.SwiftObjectNotFoundError(obj=plug_obj,
+ container=container,
+ operation='get')
+ return {"inventory": inventory_data, "plugin_data": plugin_data}
diff --git a/ironic/drivers/modules/inspector.py b/ironic/drivers/modules/inspector.py
index 833934793..dbf171714 100644
--- a/ironic/drivers/modules/inspector.py
+++ b/ironic/drivers/modules/inspector.py
@@ -339,7 +339,8 @@ def _check_status(task):
task.node.uuid)
try:
- status = _get_client(task.context).get_introspection(node.uuid)
+ inspector_client = _get_client(task.context)
+ status = inspector_client.get_introspection(node.uuid)
except Exception:
# NOTE(dtantsur): get_status should not normally raise
# let's assume it's a transient failure and retry later
@@ -363,6 +364,15 @@ def _check_status(task):
_inspection_error_handler(task, error)
elif status.is_finished:
_clean_up(task)
+ store_data = CONF.inventory.data_backend
+ if store_data == 'none':
+ LOG.debug('Introspection data storage is disabled, the data will '
+ 'not be saved for node %(node)s', {'node': node.uuid})
+ return
+ introspection_data = inspector_client.get_introspection_data(
+ node.uuid, processed=True)
+ inspect_utils.store_introspection_data(node, introspection_data,
+ task.context)
def _clean_up(task):
diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py
index 87420f369..218f90a5c 100644
--- a/ironic/drivers/modules/ipmitool.py
+++ b/ironic/drivers/modules/ipmitool.py
@@ -1556,6 +1556,9 @@ class IPMIShellinaboxConsole(IPMIConsole):
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
+ # Dealloc allocated port if any, so the same host can never has
+ # duplicated port.
+ _release_allocated_port(task)
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
driver_info['port'] = _allocate_port(task)
@@ -1611,6 +1614,9 @@ class IPMISocatConsole(IPMIConsole):
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
"""
+ # Dealloc allocated port if any, so the same host can never has
+ # duplicated port.
+ _release_allocated_port(task)
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
driver_info['port'] = _allocate_port(
diff --git a/ironic/drivers/modules/irmc/common.py b/ironic/drivers/modules/irmc/common.py
index 7a8fc0f1d..4341a82f4 100644
--- a/ironic/drivers/modules/irmc/common.py
+++ b/ironic/drivers/modules/irmc/common.py
@@ -15,9 +15,12 @@
"""
Common functionalities shared between different iRMC modules.
"""
+import json
import os
+import re
from oslo_log import log as logging
+from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import strutils
@@ -31,6 +34,23 @@ scci = importutils.try_import('scciclient.irmc.scci')
elcm = importutils.try_import('scciclient.irmc.elcm')
LOG = logging.getLogger(__name__)
+
+
+IRMC_OS_NAME_R = re.compile(r'iRMC\s+S\d+')
+IRMC_OS_NAME_NUM_R = re.compile(r'\d+$')
+IRMC_FW_VER_R = re.compile(r'\d(\.\d+)*\w*')
+IRMC_FW_VER_NUM_R = re.compile(r'\d(\.\d+)*')
+
+IPMI_ENABLED_BY_DEFAULT_RANGES = {
+ # iRMC S4 enables IPMI over LAN by default
+ '4': None,
+ # iRMC S5 enables IPMI over LAN by default
+ '5': None,
+ # iRMC S6 disables IPMI over LAN by default from version 2.00
+ '6': {'upper': '2.00'}}
+
+ELCM_STATUS_PATH = '/rest/v1/Oem/eLCM/eLCMStatus'
+
REQUIRED_PROPERTIES = {
'irmc_address': _("IP address or hostname of the iRMC. Required."),
'irmc_username': _("Username for the iRMC with administrator privileges. "
@@ -83,7 +103,9 @@ SNMP_V3_REQUIRED_PROPERTIES = {
SNMP_V3_OPTIONAL_PROPERTIES = {
'irmc_snmp_auth_proto': _("SNMPv3 message authentication protocol ID. "
"Required for version 'v3'. "
- "'sha' is supported."),
+ "If using iRMC S4/S5, only 'sha' is supported."
+ "If using iRMC S6, the valid options are "
+ "'sha256', 'sha384', 'sha512'."),
'irmc_snmp_priv_proto': _("SNMPv3 message privacy (encryption) protocol "
"ID. Required for version 'v3'. "
"'aes' is supported."),
@@ -243,7 +265,8 @@ def _parse_snmp_driver_info(node, info):
def _parse_snmp_v3_info(node, info):
snmp_info = {}
missing_info = []
- valid_values = {'irmc_snmp_auth_proto': ['sha'],
+ valid_values = {'irmc_snmp_auth_proto': ['sha', 'sha256', 'sha384',
+ 'sha512'],
'irmc_snmp_priv_proto': ['aes']}
valid_protocols = {'irmc_snmp_auth_proto': snmp.snmp_auth_protocols,
'irmc_snmp_priv_proto': snmp.snmp_priv_protocols}
@@ -433,3 +456,202 @@ def set_secure_boot_mode(node, enable):
raise exception.IRMCOperationError(
operation=_("setting secure boot mode"),
error=irmc_exception)
+
+
+def check_elcm_license(node):
+ """Connect to iRMC and return status of eLCM license
+
+ This function connects to iRMC REST API and check whether eLCM
+ license is active. This function can be used to check connection to
+ iRMC REST API.
+
+ :param node: An ironic node object
+ :returns: dictionary whose keys are 'active' and 'status_code'.
+ value of 'active' is boolean showing if eLCM license is active
+ and value of 'status_code' is int which is HTTP return code
+ from iRMC REST API access
+ :raises: InvalidParameterValue if invalid value is contained
+ in the 'driver_info' property.
+ :raises: MissingParameterValue if some mandatory key is missing
+ in the 'driver_info' property.
+ :raises: IRMCOperationError if the operation fails.
+ """
+ try:
+ d_info = parse_driver_info(node)
+ # GET to /rest/v1/Oem/eLCM/eLCMStatus returns
+ # JSON data like this:
+ #
+ # {
+ # "eLCMStatus":{
+ # "EnabledAndLicenced":"true",
+ # "SDCardMounted":"false"
+ # }
+ # }
+ #
+ # EnabledAndLicenced tells whether eLCM license is valid
+ #
+ r = elcm.elcm_request(d_info, 'GET', ELCM_STATUS_PATH)
+
+ # If r.status_code is 200, it means success and r.text is JSON.
+ # If it is 500, it means there is problem at iRMC side
+ # and iRMC cannot return eLCM status.
+ # If it was 401, elcm_request raises SCCIClientError.
+ # Otherwise, r.text may not be JSON.
+ if r.status_code == 200:
+ license_active = strutils.bool_from_string(
+ jsonutils.loads(r.text)['eLCMStatus']['EnabledAndLicenced'],
+ strict=True)
+ else:
+ license_active = False
+
+ return {'active': license_active, 'status_code': r.status_code}
+ except (scci.SCCIError,
+ json.JSONDecodeError,
+ TypeError,
+ KeyError,
+ ValueError) as irmc_exception:
+ LOG.error("Failed to check eLCM license status for node $(node)s",
+ {'node': node.uuid})
+ raise exception.IRMCOperationError(
+ operation='checking eLCM license status',
+ error=irmc_exception)
+
+
+def set_irmc_version(task):
+ """Fetch and save iRMC firmware version.
+
+ This function should be called before calling any other functions which
+ need to check node's iRMC firmware version.
+
+ Set `<iRMC OS>/<fw version>` to driver_internal_info['irmc_fw_version']
+
+ :param node: An ironic node object
+ :raises: InvalidParameterValue if invalid value is contained
+ in the 'driver_info' property.
+ :raises: MissingParameterValue if some mandatory key is missing
+ in the 'driver_info' property.
+ :raises: IRMCOperationError if the operation fails.
+ :raises: NodeLocked if the target node is already locked.
+ """
+
+ node = task.node
+ try:
+ report = get_irmc_report(node)
+ irmc_os, fw_version = scci.get_irmc_version_str(report)
+
+ fw_ver = node.driver_internal_info.get('irmc_fw_version')
+ if fw_ver != '/'.join([irmc_os, fw_version]):
+ task.upgrade_lock(purpose='saving firmware version')
+ node.set_driver_internal_info('irmc_fw_version',
+ f"{irmc_os}/{fw_version}")
+ node.save()
+ except scci.SCCIError as irmc_exception:
+ LOG.error("Failed to fetch iRMC FW version for node %s",
+ node.uuid)
+ raise exception.IRMCOperationError(
+ operation=_("fetching irmc fw version "),
+ error=irmc_exception)
+
+
+def _version_lt(v1, v2):
+ v1_l = v1.split('.')
+ v2_l = v2.split('.')
+ if len(v1_l) <= len(v2_l):
+ v1_l.extend(['0'] * (len(v2_l) - len(v1_l)))
+ else:
+ v2_l.extend(['0'] * (len(v1_l) - len(v2_l)))
+
+ for i in range(len(v1_l)):
+ if int(v1_l[i]) < int(v2_l[i]):
+ return True
+ elif int(v1_l[i]) > int(v2_l[i]):
+ return False
+ else:
+ return False
+
+
+def _version_le(v1, v2):
+ v1_l = v1.split('.')
+ v2_l = v2.split('.')
+ if len(v1_l) <= len(v2_l):
+ v1_l.extend(['0'] * (len(v2_l) - len(v1_l)))
+ else:
+ v2_l.extend(['0'] * (len(v1_l) - len(v2_l)))
+
+ for i in range(len(v1_l)):
+ if int(v1_l[i]) < int(v2_l[i]):
+ return True
+ elif int(v1_l[i]) > int(v2_l[i]):
+ return False
+ else:
+ return True
+
+
+def within_version_ranges(node, version_ranges):
+ """Read saved iRMC FW version and check if it is within the passed ranges.
+
+ :param node: An ironic node object
+ :param version_ranges: A Python dictionary containing version ranges in the
+ next format: <os_n>: <ranges>, where <os_n> is a string representing
+ iRMC OS number (e.g. '4') and <ranges> is a dictionaries indicating
+ the specific firmware version ranges under the iRMC OS number <os_n>.
+
+ The dictionary used in <ranges> only has two keys: 'min' and 'upper',
+ and value of each key is a string representing iRMC firmware version
+ number or None. Both keys can be absent and their value can be None.
+
+ It is acceptable to not set ranges for a <os_n> (for example set
+ <ranges> to None, {}, etc...), in this case, this function only
+ checks if the node's iRMC OS number matches the <os_n>.
+
+ Valid <version_ranges> example:
+ {'3': None, # all version of iRMC S3 matches
+ '4': {}, # all version of iRMC S4 matches
+ # all version of iRMC S5 matches
+ '5': {'min': None, 'upper': None},
+ # iRMC S6 whose version is >=1.20 matches
+ '6': {'min': '1.20', 'upper': None},
+ # iRMC S7 whose version is
+ # 5.51<= (version) <8.23 matches
+ '7': {'min': '5.51', 'upper': '8.23'}}
+
+ :returns: True if node's iRMC FW is in range, False if not or
+ fails to parse firmware version
+ """
+
+ try:
+ fw_version = node.driver_internal_info.get('irmc_fw_version', '')
+ irmc_os, irmc_ver = fw_version.split('/')
+
+ if IRMC_OS_NAME_R.match(irmc_os) and IRMC_FW_VER_R.match(irmc_ver):
+ os_num = IRMC_OS_NAME_NUM_R.search(irmc_os).group(0)
+ fw_num = IRMC_FW_VER_NUM_R.search(irmc_ver).group(0)
+
+ if os_num not in version_ranges:
+ return False
+
+ v_range = version_ranges[os_num]
+
+ # An OS number with no ranges setted means no need to check
+ # specific version, all the version under this OS number is valid.
+ if not v_range:
+ return True
+
+ # Specific range is setted, check if the node's
+ # firmware version is within it.
+ min_ver = v_range.get('min')
+ upper_ver = v_range.get('upper')
+ flag = True
+ if min_ver:
+ flag = _version_le(min_ver, fw_num)
+ if flag and upper_ver:
+ flag = _version_lt(fw_num, upper_ver)
+ return flag
+
+ except Exception:
+ # All exceptions are ignored
+ pass
+
+ LOG.warning('Failed to parse iRMC firmware version on node %(uuid)s: '
+ '%(fw_ver)s', {'uuid': node.uuid, 'fw_ver': fw_version})
+ return False
diff --git a/ironic/drivers/modules/irmc/inspect.py b/ironic/drivers/modules/irmc/inspect.py
index 9b6bff5bc..f7c2ad7ba 100644
--- a/ironic/drivers/modules/irmc/inspect.py
+++ b/ironic/drivers/modules/irmc/inspect.py
@@ -32,7 +32,7 @@ from ironic.drivers.modules.irmc import common as irmc_common
from ironic.drivers.modules import snmp
from ironic import objects
-scci = importutils.try_import('scciclient.irmc.scci')
+irmc = importutils.try_import('scciclient.irmc')
LOG = logging.getLogger(__name__)
@@ -122,6 +122,39 @@ def _get_mac_addresses(node):
if c == NODE_CLASS_OID_VALUE['primary']]
+def _get_capabilities_properties_without_ipmi(d_info, cap_props,
+ current_cap, props):
+ capabilities = {}
+ snmp_client = snmp.SNMPClient(
+ address=d_info['irmc_address'],
+ port=d_info['irmc_snmp_port'],
+ version=d_info['irmc_snmp_version'],
+ read_community=d_info['irmc_snmp_community'],
+ user=d_info.get('irmc_snmp_user'),
+ auth_proto=d_info.get('irmc_snmp_auth_proto'),
+ auth_key=d_info.get('irmc_snmp_auth_password'),
+ priv_proto=d_info.get('irmc_snmp_priv_proto'),
+ priv_key=d_info.get('irmc_snmp_priv_password'))
+
+ if 'rom_firmware_version' in cap_props:
+ capabilities['rom_firmware_version'] = \
+ irmc.snmp.get_bios_firmware_version(snmp_client)
+
+ if 'irmc_firmware_version' in cap_props:
+ capabilities['irmc_firmware_version'] = \
+ irmc.snmp.get_irmc_firmware_version(snmp_client)
+
+ if 'server_model' in cap_props:
+ capabilities['server_model'] = irmc.snmp.get_server_model(
+ snmp_client)
+
+ capabilities = utils.get_updated_capabilities(current_cap, capabilities)
+ if capabilities:
+ props['capabilities'] = capabilities
+
+ return props
+
+
def _inspect_hardware(node, existing_traits=None, **kwargs):
"""Inspect the node and get hardware information.
@@ -161,39 +194,50 @@ def _inspect_hardware(node, existing_traits=None, **kwargs):
try:
report = irmc_common.get_irmc_report(node)
- props = scci.get_essential_properties(
+ props = irmc.scci.get_essential_properties(
report, IRMCInspect.ESSENTIAL_PROPERTIES)
d_info = irmc_common.parse_driver_info(node)
- capabilities = scci.get_capabilities_properties(
- d_info,
- capabilities_props,
- gpu_ids,
- fpga_ids=fpga_ids,
- **kwargs)
- if capabilities:
- if capabilities.get('pci_gpu_devices') == 0:
- capabilities.pop('pci_gpu_devices')
-
- cpu_fpga = capabilities.pop('cpu_fpga', 0)
- if cpu_fpga == 0 and 'CUSTOM_CPU_FPGA' in new_traits:
- new_traits.remove('CUSTOM_CPU_FPGA')
- elif cpu_fpga != 0 and 'CUSTOM_CPU_FPGA' not in new_traits:
- new_traits.append('CUSTOM_CPU_FPGA')
-
- # Ironic no longer supports trusted boot
- capabilities.pop('trusted_boot', None)
- capabilities = utils.get_updated_capabilities(
- node.properties.get('capabilities'), capabilities)
+ if node.driver_internal_info.get('irmc_ipmi_succeed'):
+ capabilities = irmc.scci.get_capabilities_properties(
+ d_info,
+ capabilities_props,
+ gpu_ids,
+ fpga_ids=fpga_ids,
+ **kwargs)
if capabilities:
- props['capabilities'] = capabilities
+ if capabilities.get('pci_gpu_devices') == 0:
+ capabilities.pop('pci_gpu_devices')
+
+ cpu_fpga = capabilities.pop('cpu_fpga', 0)
+ if cpu_fpga == 0 and 'CUSTOM_CPU_FPGA' in new_traits:
+ new_traits.remove('CUSTOM_CPU_FPGA')
+ elif cpu_fpga != 0 and 'CUSTOM_CPU_FPGA' not in new_traits:
+ new_traits.append('CUSTOM_CPU_FPGA')
+
+ # Ironic no longer supports trusted boot
+ capabilities.pop('trusted_boot', None)
+ capabilities = utils.get_updated_capabilities(
+ node.properties.get('capabilities', ''), capabilities)
+ if capabilities:
+ props['capabilities'] = capabilities
+
+ else:
+ props = _get_capabilities_properties_without_ipmi(
+ d_info, capabilities_props,
+ node.properties.get('capabilities', ''), props)
macs = _get_mac_addresses(node)
- except (scci.SCCIInvalidInputError,
- scci.SCCIClientError,
+ except (irmc.scci.SCCIInvalidInputError,
+ irmc.scci.SCCIClientError,
exception.SNMPFailure) as e:
+ advice = ""
+ if ("SNMP operation" in str(e)):
+ advice = ("The SNMP related parameters' value may be different "
+ "with the server, please check if you have set them "
+ "correctly.")
error = (_("Inspection failed for node %(node_id)s "
- "with the following error: %(error)s") %
- {'node_id': node.uuid, 'error': e})
+ "with the following error: %(error)s. (advice)s") %
+ {'node_id': node.uuid, 'error': e, 'advice': advice})
raise exception.HardwareInspectionFailure(error=error)
return props, macs, new_traits
diff --git a/ironic/drivers/modules/irmc/management.py b/ironic/drivers/modules/irmc/management.py
index 079ae9e44..cf146f2cd 100644
--- a/ironic/drivers/modules/irmc/management.py
+++ b/ironic/drivers/modules/irmc/management.py
@@ -27,9 +27,10 @@ from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic import conf
from ironic.drivers import base
+from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import common as irmc_common
-from ironic.drivers import utils as driver_utils
+from ironic.drivers.modules.redfish import management as redfish_management
irmc = importutils.try_import('scciclient.irmc')
@@ -204,7 +205,8 @@ def _restore_bios_config(task):
manager_utils.node_power_action(task, states.POWER_ON)
-class IRMCManagement(ipmitool.IPMIManagement):
+class IRMCManagement(ipmitool.IPMIManagement,
+ redfish_management.RedfishManagement):
def get_properties(self):
"""Return the properties of the interface.
@@ -224,9 +226,30 @@ class IRMCManagement(ipmitool.IPMIManagement):
:raises: InvalidParameterValue if required parameters are invalid.
:raises: MissingParameterValue if a required parameter is missing.
"""
- irmc_common.parse_driver_info(task.node)
- irmc_common.update_ipmi_properties(task)
- super(IRMCManagement, self).validate(task)
+ if task.node.driver_internal_info.get('irmc_ipmi_succeed'):
+ irmc_common.parse_driver_info(task.node)
+ irmc_common.update_ipmi_properties(task)
+ super(IRMCManagement, self).validate(task)
+ else:
+ irmc_common.parse_driver_info(task.node)
+ super(ipmitool.IPMIManagement, self).validate(task)
+
+ def get_supported_boot_devices(self, task):
+ """Get list of supported boot devices
+
+ Actual code is delegated to IPMIManagement or RedfishManagement
+ based on iRMC firmware version.
+
+ :param task: A TaskManager instance
+ :returns: A list with the supported boot devices defined
+ in :mod:`ironic.common.boot_devices`.
+
+ """
+ if task.node.driver_internal_info.get('irmc_ipmi_succeed'):
+ return super(IRMCManagement, self).get_supported_boot_devices(task)
+ else:
+ return super(ipmitool.IPMIManagement,
+ self).get_supported_boot_devices(task)
@METRICS.timer('IRMCManagement.set_boot_device')
@task_manager.require_exclusive_lock
@@ -245,39 +268,112 @@ class IRMCManagement(ipmitool.IPMIManagement):
specified.
:raises: MissingParameterValue if a required parameter is missing.
:raises: IPMIFailure on an error from ipmitool.
+ :raises: RedfishConnectionError on Redfish operation failure.
+ :raises: RedfishError on Redfish operation failure.
+ """
+ if task.node.driver_internal_info.get('irmc_ipmi_succeed'):
+ if device not in self.get_supported_boot_devices(task):
+ raise exception.InvalidParameterValue(_(
+ "Invalid boot device %s specified.") % device)
+
+ uefi_mode = (
+ boot_mode_utils.get_boot_mode(task.node) == 'uefi')
+
+ # disable 60 secs timer
+ timeout_disable = "0x00 0x08 0x03 0x08"
+ ipmitool.send_raw(task, timeout_disable)
+
+ # note(naohirot):
+ # Set System Boot Options : ipmi cmd '0x08', bootparam '0x05'
+ #
+ # $ ipmitool raw 0x00 0x08 0x05 data1 data2 0x00 0x00 0x00
+ #
+ # data1 : '0xe0' persistent + uefi
+ # '0xc0' persistent + bios
+ # '0xa0' next only + uefi
+ # '0x80' next only + bios
+ # data2 : boot device defined in the dict _BOOTPARAM5_DATA2
+
+ bootparam5 = '0x00 0x08 0x05 %s %s 0x00 0x00 0x00'
+ if persistent:
+ data1 = '0xe0' if uefi_mode else '0xc0'
+ else:
+ data1 = '0xa0' if uefi_mode else '0x80'
+ data2 = _BOOTPARAM5_DATA2[device]
+
+ cmd8 = bootparam5 % (data1, data2)
+ ipmitool.send_raw(task, cmd8)
+ else:
+ if device not in self.get_supported_boot_devices(task):
+ raise exception.InvalidParameterValue(_(
+ "Invalid boot device %s specified. "
+ "Current iRMC firmware condition doesn't support IPMI "
+ "but Redfish.") % device)
+ super(ipmitool.IPMIManagement, self).set_boot_device(
+ task, device, persistent)
+
+ def get_boot_device(self, task):
+ """Get the current boot device for the task's node.
+ Returns the current boot device of the node.
+
+ :param task: a task from TaskManager.
+ :raises: InvalidParameterValue if an invalid boot device is
+ specified.
+ :raises: MissingParameterValue if a required parameter is missing.
+ :raises: IPMIFailure on an error from ipmitool.
+ :raises: RedfishConnectionError on Redfish operation failure.
+ :raises: RedfishError on Redfish operation failure.
+ :returns: a dictionary containing:
+
+ :boot_device: the boot device, one of
+ :mod:`ironic.common.boot_devices` or None if it is unknown.
+ :persistent: Whether the boot device will persist to all
+ future boots or not, None if it is unknown.
"""
- if device not in self.get_supported_boot_devices(task):
- raise exception.InvalidParameterValue(_(
- "Invalid boot device %s specified.") % device)
-
- uefi_mode = (
- driver_utils.get_node_capability(task.node, 'boot_mode') == 'uefi')
-
- # disable 60 secs timer
- timeout_disable = "0x00 0x08 0x03 0x08"
- ipmitool.send_raw(task, timeout_disable)
-
- # note(naohirot):
- # Set System Boot Options : ipmi cmd '0x08', bootparam '0x05'
- #
- # $ ipmitool raw 0x00 0x08 0x05 data1 data2 0x00 0x00 0x00
- #
- # data1 : '0xe0' persistent + uefi
- # '0xc0' persistent + bios
- # '0xa0' next only + uefi
- # '0x80' next only + bios
- # data2 : boot device defined in the dict _BOOTPARAM5_DATA2
-
- bootparam5 = '0x00 0x08 0x05 %s %s 0x00 0x00 0x00'
- if persistent:
- data1 = '0xe0' if uefi_mode else '0xc0'
+ if task.node.driver_internal_info.get('irmc_ipmi_succeed'):
+ return super(IRMCManagement, self).get_boot_device(task)
else:
- data1 = '0xa0' if uefi_mode else '0x80'
- data2 = _BOOTPARAM5_DATA2[device]
+ return super(
+ ipmitool.IPMIManagement, self).get_boot_device(task)
- cmd8 = bootparam5 % (data1, data2)
- ipmitool.send_raw(task, cmd8)
+ def get_supported_boot_modes(self, task):
+ """Get a list of the supported boot modes.
+
+ IRMCManagement class doesn't support this method
+
+ :param task: a task from TaskManager.
+ :raises: UnsupportedDriverExtension if requested operation is
+ not supported by the driver
+ """
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='get_supported_boot_modes')
+
+ def set_boot_mode(self, task, mode):
+ """Set the boot mode for a node.
+
+ IRMCManagement class doesn't support this method
+
+ :param task: a task from TaskManager.
+ :param mode: The boot mode, one of
+ :mod:`ironic.common.boot_modes`.
+ :raises: UnsupportedDriverExtension if requested operation is
+ not supported by the driver
+ """
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='set_boot_mode')
+
+ def get_boot_mode(self, task):
+ """Get the current boot mode for a node.
+
+ IRMCManagement class doesn't support this method
+
+ :param task: a task from TaskManager.
+ :raises: UnsupportedDriverExtension if requested operation is
+ not supported by the driver
+ """
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='get_boot_mode')
@METRICS.timer('IRMCManagement.get_sensors_data')
def get_sensors_data(self, task):
@@ -330,7 +426,13 @@ class IRMCManagement(ipmitool.IPMIManagement):
if sensor_method == 'scci':
return _get_sensors_data(task)
elif sensor_method == 'ipmitool':
- return super(IRMCManagement, self).get_sensors_data(task)
+ if task.node.driver_internal_info.get('irmc_ipmi_succeed'):
+ return super(IRMCManagement, self).get_sensors_data(task)
+ else:
+ raise exception.InvalidParameterValue(_(
+ "Invalid sensor method %s specified. "
+ "IPMI operation doesn't work on current iRMC "
+ "condition.") % sensor_method)
@METRICS.timer('IRMCManagement.inject_nmi')
@task_manager.require_exclusive_lock
@@ -401,3 +503,120 @@ class IRMCManagement(ipmitool.IPMIManagement):
not supported by the driver or the hardware
"""
return irmc_common.set_secure_boot_mode(task.node, state)
+
+ def get_supported_indicators(self, task, component=None):
+ """Get a map of the supported indicators (e.g. LEDs).
+
+ IRMCManagement class doesn't support this method
+
+ :param task: a task from TaskManager.
+ :param component: If not `None`, return indicator information
+ for just this component, otherwise return indicators for
+ all existing components.
+ :raises: UnsupportedDriverExtension if requested operation is
+ not supported by the driver
+
+ """
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='get_supported_indicators')
+
+ def set_indicator_state(self, task, component, indicator, state):
+ """Set indicator on the hardware component to the desired state.
+
+ IRMCManagement class doesn't support this method
+
+ :param task: A task from TaskManager.
+ :param component: The hardware component, one of
+ :mod:`ironic.common.components`.
+ :param indicator: Indicator ID (as reported by
+ `get_supported_indicators`).
+ :state: Desired state of the indicator, one of
+ :mod:`ironic.common.indicator_states`.
+ :raises: UnsupportedDriverExtension if requested operation is
+ not supported by the driver
+ """
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='set_indicator_state')
+
+ def get_indicator_state(self, task, component, indicator):
+ """Get current state of the indicator of the hardware component.
+
+ IRMCManagement class doesn't support this method
+
+ :param task: A task from TaskManager.
+ :param component: The hardware component, one of
+ :mod:`ironic.common.components`.
+ :param indicator: Indicator ID (as reported by
+ `get_supported_indicators`).
+ :raises: UnsupportedDriverExtension if requested operation is
+ not supported by the driver
+ """
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='get_indicator_state')
+
+ def detect_vendor(self, task):
+ """Detects and returns the hardware vendor.
+
+ :param task: A task from TaskManager.
+ :raises: InvalidParameterValue if a required parameter is missing
+ :raises: MissingParameterValue if a required parameter is missing
+ :raises: RedfishError on Redfish operation error.
+ :raises: PasswordFileFailedToCreate from creating or writing to the
+ temporary file during IPMI operation.
+ :raises: processutils.ProcessExecutionError from executing ipmi command
+ :returns: String representing the BMC reported Vendor or
+ Manufacturer, otherwise returns None.
+ """
+ if task.node.driver_internal_info.get('irmc_ipmi_succeed'):
+ return super(IRMCManagement, self).detect_vendor(task)
+ else:
+ return super(ipmitool.IPMIManagement, self).detect_vendor(task)
+
+ def get_mac_addresses(self, task):
+ """Get MAC address information for the node.
+
+ IRMCManagement class doesn't support this method
+
+ :param task: A TaskManager instance containing the node to act on.
+ :raises: UnsupportedDriverExtension
+ """
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='get_mac_addresses')
+
+ @base.verify_step(priority=10)
+ def verify_http_https_connection_and_fw_version(self, task):
+ """Check http(s) connection to iRMC and save fw version
+
+ :param task' A task from TaskManager
+ 'raises: IRMCOperationError
+ """
+ error_msg_https = ('Access to REST API returns unexpected '
+ 'status code. Check driver_info parameter '
+ 'related to iRMC driver')
+ error_msg_http = ('Access to REST API returns unexpected '
+ 'status code. Check driver_info parameter '
+ 'or version of iRMC because iRMC does not '
+ 'support HTTP connection to iRMC REST API '
+ 'since iRMC S6 2.00.')
+ try:
+ # Check connection to iRMC
+ elcm_license = irmc_common.check_elcm_license(task.node)
+
+ # On iRMC S6 2.00, access to REST API through HTTP returns 404
+ if elcm_license.get('status_code') not in (200, 500):
+ port = task.node.driver_info.get(
+ 'irmc_port', CONF.irmc.get('port'))
+ if port == 80:
+ e_msg = error_msg_http
+ else:
+ e_msg = error_msg_https
+ raise exception.IRMCOperationError(
+ operation='establishing connection to REST API',
+ error=e_msg)
+
+ irmc_common.set_irmc_version(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as irmc_exception:
+ raise exception.IRMCOperationError(
+ operation='configuration validation',
+ error=irmc_exception)
diff --git a/ironic/drivers/modules/irmc/power.py b/ironic/drivers/modules/irmc/power.py
index 28041d835..48f7ea321 100644
--- a/ironic/drivers/modules/irmc/power.py
+++ b/ironic/drivers/modules/irmc/power.py
@@ -29,6 +29,7 @@ from ironic.drivers import base
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import boot as irmc_boot
from ironic.drivers.modules.irmc import common as irmc_common
+from ironic.drivers.modules.redfish import power as redfish_power
from ironic.drivers.modules import snmp
scci = importutils.try_import('scciclient.irmc.scci')
@@ -203,14 +204,17 @@ def _set_power_state(task, target_state, timeout=None):
_wait_power_state(task, states.SOFT_REBOOT, timeout=timeout)
except exception.SNMPFailure as snmp_exception:
+ advice = ("The SNMP related parameters' value may be different with "
+ "the server, please check if you have set them correctly.")
LOG.error("iRMC failed to acknowledge the target state "
- "for node %(node_id)s. Error: %(error)s",
- {'node_id': node.uuid, 'error': snmp_exception})
+ "for node %(node_id)s. Error: %(error)s. %(advice)s",
+ {'node_id': node.uuid, 'error': snmp_exception,
+ 'advice': advice})
raise exception.IRMCOperationError(operation=target_state,
error=snmp_exception)
-class IRMCPower(base.PowerInterface):
+class IRMCPower(redfish_power.RedfishPower, base.PowerInterface):
"""Interface for power-related actions."""
def get_properties(self):
@@ -233,7 +237,19 @@ class IRMCPower(base.PowerInterface):
is missing or invalid on the node.
:raises: MissingParameterValue if a required parameter is missing.
"""
- irmc_common.parse_driver_info(task.node)
+ # validate method of power interface is called at very first point
+ # in verifying.
+ # We take try-fallback approach against iRMC S6 2.00 and later
+ # incompatibility in which iRMC firmware disables IPMI by default.
+ # get_power_state method first try IPMI and if fails try Redfish
+ # along with setting irmc_ipmi_succeed flag to indicate if IPMI works.
+ if (task.node.driver_internal_info.get('irmc_ipmi_succeed')
+ or (task.node.driver_internal_info.get('irmc_ipmi_succeed')
+ is None)):
+ irmc_common.parse_driver_info(task.node)
+ else:
+ irmc_common.parse_driver_info(task.node)
+ super(IRMCPower, self).validate(task)
@METRICS.timer('IRMCPower.get_power_state')
def get_power_state(self, task):
@@ -241,14 +257,40 @@ class IRMCPower(base.PowerInterface):
:param task: a TaskManager instance containing the node to act on.
:returns: a power state. One of :mod:`ironic.common.states`.
- :raises: InvalidParameterValue if required ipmi parameters are missing.
- :raises: MissingParameterValue if a required parameter is missing.
- :raises: IPMIFailure on an error from ipmitool (from _power_status
- call).
+ :raises: InvalidParameterValue if required parameters are incorrect.
+ :raises: MissingParameterValue if required parameters are missing.
+ :raises: IRMCOperationError If IPMI or Redfish operation fails
"""
- irmc_common.update_ipmi_properties(task)
- ipmi_power = ipmitool.IPMIPower()
- return ipmi_power.get_power_state(task)
+ # If IPMI operation failed, iRMC may not enable/support IPMI,
+ # so fallback to Redfish.
+ # get_power_state is called at verifying and is called periodically
+ # so this method is good choice to determine IPMI enablement.
+ try:
+ irmc_common.update_ipmi_properties(task)
+ ipmi_power = ipmitool.IPMIPower()
+ pw_state = ipmi_power.get_power_state(task)
+ if (task.node.driver_internal_info.get('irmc_ipmi_succeed')
+ is not True):
+ task.upgrade_lock(purpose='update irmc_ipmi_succeed flag',
+ retry=True)
+ task.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ task.node.save()
+ task.downgrade_lock()
+ return pw_state
+ except exception.IPMIFailure:
+ if (task.node.driver_internal_info.get('irmc_ipmi_succeed')
+ is not False):
+ task.upgrade_lock(purpose='update irmc_ipmi_succeed flag',
+ retry=True)
+ task.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ task.node.save()
+ task.downgrade_lock()
+ try:
+ return super(IRMCPower, self).get_power_state(task)
+ except (exception.RedfishConnectionError,
+ exception.RedfishError):
+ raise exception.IRMCOperationError(
+ operation='IPMI try and Redfish fallback operation')
@METRICS.timer('IRMCPower.set_power_state')
@task_manager.require_exclusive_lock
diff --git a/ironic/drivers/modules/irmc/vendor.py b/ironic/drivers/modules/irmc/vendor.py
new file mode 100644
index 000000000..35535f69d
--- /dev/null
+++ b/ironic/drivers/modules/irmc/vendor.py
@@ -0,0 +1,75 @@
+# Copyright 2022 FUJITSU LIMITED
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Vendor interface of iRMC driver
+"""
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.drivers import base
+from ironic.drivers.modules.irmc import common as irmc_common
+
+
+class IRMCVendorPassthru(base.VendorInterface):
+ def get_properties(self):
+ """Return the properties of the interface.
+
+ :returns: Dictionary of <property name>:<property description> entries.
+ """
+ return irmc_common.COMMON_PROPERTIES
+
+ def validate(self, task, method=None, **kwargs):
+ """Validate vendor-specific actions.
+
+ This method validates whether the 'driver_info' property of the
+ supplied node contains the required information for this driver.
+
+ :param task: An instance of TaskManager.
+ :param method: Name of vendor passthru method
+ :raises: InvalidParameterValue if invalid value is contained
+ in the 'driver_info' property.
+ :raises: MissingParameterValue if some mandatory key is missing
+ in the 'driver_info' property.
+ """
+ irmc_common.parse_driver_info(task.node)
+
+ @base.passthru(['POST'],
+ async_call=True,
+ description='Connect to iRMC and fetch iRMC firmware '
+ 'version and, if firmware version has not been cached '
+ 'in or actual firmware version is different from one in '
+ 'driver_internal_info/irmc_fw_version, store firmware '
+ 'version in driver_internal_info/irmc_fw_version.',
+ attach=False,
+ require_exclusive_lock=False)
+ def cache_irmc_firmware_version(self, task, **kwargs):
+ """Fetch and save iRMC firmware version.
+
+ This method connects to iRMC and fetch iRMC firmware verison.
+ If fetched firmware version is not cached in or is different from
+ one in driver_internal_info/irmc_fw_version, store fetched version
+ in driver_internal_info/irmc_fw_version.
+
+ :param task: An instance of TaskManager.
+ :raises: IRMCOperationError if some error occurs
+ """
+ try:
+ irmc_common.set_irmc_version(task)
+ except (exception.IRMCOperationError,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.NodeLocked) as e:
+ raise exception.IRMCOperationError(
+ operation=_('caching firmware version'), error=e)
diff --git a/ironic/drivers/modules/ks.cfg.template b/ironic/drivers/modules/ks.cfg.template
index ca799953a..93788fdb8 100644
--- a/ironic/drivers/modules/ks.cfg.template
+++ b/ironic/drivers/modules/ks.cfg.template
@@ -36,11 +36,11 @@ liveimg --url {{ ks_options.liveimg_url }}
# Following %pre and %onerror sections are mandatory
%pre
-/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "start", "agent_status_message": "Deployment starting. Running pre-installation scripts."}' {{ ks_options.heartbeat_url }}
+/usr/bin/curl {% if 'insecure_heartbeat' in ks_options %}--insecure{% endif %} -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "start", "agent_status_message": "Deployment starting. Running pre-installation scripts."}' {{ ks_options.heartbeat_url }}
%end
%onerror
-/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "error", "agent_status_message": "Error: Deploying using anaconda. Check console for more information."}' {{ ks_options.heartbeat_url }}
+/usr/bin/curl {% if 'insecure_heartbeat' in ks_options %}--insecure{% endif %} -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "error", "agent_status_message": "Error: Deploying using anaconda. Check console for more information."}' {{ ks_options.heartbeat_url }}
%end
# Config-drive information, if any.
@@ -54,5 +54,5 @@ liveimg --url {{ ks_options.liveimg_url }}
# before rebooting.
%post
sync
-/usr/bin/curl -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "end", "agent_status_message": "Deployment completed successfully."}' {{ ks_options.heartbeat_url }}
+/usr/bin/curl {% if 'insecure_heartbeat' in ks_options %}--insecure{% endif %} -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'X-OpenStack-Ironic-API-Version: 1.72' -d '{"callback_url": "", "agent_token": "{{ ks_options.agent_token }}", "agent_status": "end", "agent_status_message": "Deployment completed successfully."}' {{ ks_options.heartbeat_url }}
%end
diff --git a/ironic/drivers/modules/pxe_grub_config.template b/ironic/drivers/modules/pxe_grub_config.template
index d8fc48673..3bcdf55d6 100644
--- a/ironic/drivers/modules/pxe_grub_config.template
+++ b/ironic/drivers/modules/pxe_grub_config.template
@@ -15,3 +15,8 @@ menuentry "boot_ramdisk" {
menuentry "boot_whole_disk" {
linuxefi chain.c32 mbr:{{ DISK_IDENTIFIER }}
}
+
+menuentry "boot_anaconda" {
+ linuxefi {{ pxe_options.aki_path }} text {{ pxe_options.pxe_append_params|default("", true) }} inst.ks={{ pxe_options.ks_cfg_url }} {% if pxe_options.repo_url %}inst.repo={{ pxe_options.repo_url }}{% else %}inst.stage2={{ pxe_options.stage2_url }}{% endif %}
+ initrdefi {{ pxe_options.ari_path }}
+}
diff --git a/ironic/drivers/modules/redfish/raid.py b/ironic/drivers/modules/redfish/raid.py
index 809ec59c6..154cd53d3 100644
--- a/ironic/drivers/modules/redfish/raid.py
+++ b/ironic/drivers/modules/redfish/raid.py
@@ -1120,7 +1120,9 @@ class RedfishRAID(base.RAIDInterface):
raid_configs['pending'].setdefault(controller, []).append(
logical_disk)
- node.set_driver_internal_info('raid_configs', raid_configs)
+ # Store only when async operation
+ if reboot_required:
+ node.set_driver_internal_info('raid_configs', raid_configs)
return raid_configs, reboot_required
@@ -1182,7 +1184,9 @@ class RedfishRAID(base.RAIDInterface):
response.task_monitor_uri)
reboot_required = True
- node.set_driver_internal_info('raid_configs', raid_configs)
+ # Store only when async operation
+ if reboot_required:
+ node.set_driver_internal_info('raid_configs', raid_configs)
return raid_configs, reboot_required
diff --git a/ironic/drivers/modules/redfish/utils.py b/ironic/drivers/modules/redfish/utils.py
index 40cf33bce..e85e2ec6a 100644
--- a/ironic/drivers/modules/redfish/utils.py
+++ b/ironic/drivers/modules/redfish/utils.py
@@ -15,6 +15,7 @@
# under the License.
import collections
+import hashlib
import os
from urllib import parse as urlparse
@@ -198,43 +199,59 @@ class SessionCache(object):
_sessions = collections.OrderedDict()
def __init__(self, driver_info):
+ # Hash the password in the data structure, so we can
+ # include it in the session key.
+ # NOTE(TheJulia): Multiplying the address by 4, to ensure
+ # we meet a minimum of 16 bytes for salt.
+ pw_hash = hashlib.pbkdf2_hmac(
+ 'sha512',
+ driver_info.get('password').encode('utf-8'),
+ str(driver_info.get('address') * 4).encode('utf-8'), 40)
self._driver_info = driver_info
+ # Assemble the session key and append the hashed password to it,
+ # which forces new sessions to be established when the saved password
+ # is changed, just like the username, or address.
self._session_key = tuple(
self._driver_info.get(key)
for key in ('address', 'username', 'verify_ca')
- )
+ ) + (pw_hash.hex(),)
def __enter__(self):
try:
return self.__class__._sessions[self._session_key]
-
except KeyError:
- auth_type = self._driver_info['auth_type']
+ LOG.debug('A cached redfish session for Redfish endpoint '
+ '%(endpoint)s was not detected, initiating a session.',
+ {'endpoint': self._driver_info['address']})
- auth_class = self.AUTH_CLASSES[auth_type]
+ auth_type = self._driver_info['auth_type']
- authenticator = auth_class(
- username=self._driver_info['username'],
- password=self._driver_info['password']
- )
+ auth_class = self.AUTH_CLASSES[auth_type]
- sushy_params = {'verify': self._driver_info['verify_ca'],
- 'auth': authenticator}
- if 'root_prefix' in self._driver_info:
- sushy_params['root_prefix'] = self._driver_info['root_prefix']
- conn = sushy.Sushy(
- self._driver_info['address'],
- **sushy_params
- )
+ authenticator = auth_class(
+ username=self._driver_info['username'],
+ password=self._driver_info['password']
+ )
+
+ sushy_params = {'verify': self._driver_info['verify_ca'],
+ 'auth': authenticator}
+ if 'root_prefix' in self._driver_info:
+ sushy_params['root_prefix'] = self._driver_info['root_prefix']
+ conn = sushy.Sushy(
+ self._driver_info['address'],
+ **sushy_params
+ )
- if CONF.redfish.connection_cache_size:
- self.__class__._sessions[self._session_key] = conn
+ if CONF.redfish.connection_cache_size:
+ self.__class__._sessions[self._session_key] = conn
+ # Save a secure hash of the password into memory, so if we
+ # observe it change, we can detect the session is no longer valid.
- if (len(self.__class__._sessions)
- > CONF.redfish.connection_cache_size):
- self._expire_oldest_session()
+ if (len(self.__class__._sessions)
+ > CONF.redfish.connection_cache_size):
+ self._expire_oldest_session()
- return conn
+ return conn
def __exit__(self, exc_type, exc_val, exc_tb):
# NOTE(etingof): perhaps this session token is no good
diff --git a/ironic/drivers/modules/snmp.py b/ironic/drivers/modules/snmp.py
index 4e700c6f8..d544d5687 100644
--- a/ironic/drivers/modules/snmp.py
+++ b/ironic/drivers/modules/snmp.py
@@ -799,6 +799,341 @@ class SNMPDriverBaytechMRP27(SNMPDriverSimple):
value_power_on = 1
+class SNMPDriverServerTechSentry3(SNMPDriverBase):
+ """SNMP driver class for Server Technology Sentry 3 PDUs.
+
+ ftp://ftp.servertech.com/Pub/SNMP/sentry3/Sentry3.mib
+
+ SNMP objects for Server Technology Power PDU.
+ 1.3.6.1.4.1.1718.3.2.3.1.5.1.1.<outlet ID> outletStatus
+ Read 0=off, 1=on, 2=off wait, 3=on wait, [...more options follow]
+ 1.3.6.1.4.1.1718.3.2.3.1.11.1.1.<outlet ID> outletControlAction
+ Write 0=no action, 1=on, 2=off, 3=reboot
+ """
+
+ oid_device = (1718, 3, 2, 3, 1)
+ oid_tower_infeed_idx = (1, 1, )
+ oid_power_status = (5,)
+ oid_power_action = (11,)
+
+ status_off = 0
+ status_on = 1
+ status_off_wait = 2
+ status_on_wait = 3
+
+ value_power_on = 1
+ value_power_off = 2
+
+ def __init__(self, *args, **kwargs):
+ super(SNMPDriverServerTechSentry3, self).__init__(*args, **kwargs)
+ # Due to its use of different OIDs for different actions, we only form
+ # an OID that holds the common substring of the OIDs for power
+ # operations.
+ self.oid_base = self.oid_enterprise + self.oid_device
+
+ def _snmp_oid(self, oid):
+ """Return the OID for one of the outlet control objects.
+
+ :param oid: The action-dependent portion of the OID, as a tuple of
+ integers.
+ :returns: The full OID as a tuple of integers.
+ """
+
+ outlet = self.snmp_info['outlet']
+ full_oid = self.oid_base + oid + self.oid_tower_infeed_idx + (outlet,)
+ return full_oid
+
+ def _snmp_power_state(self):
+ oid = self._snmp_oid(self.oid_power_status)
+ state = self.client.get(oid)
+
+ # Translate the state to an Ironic power state.
+ if state in (self.status_on, self.status_off_wait):
+ power_state = states.POWER_ON
+ elif state in (self.status_off, self.status_on_wait):
+ power_state = states.POWER_OFF
+ else:
+ LOG.warning("SeverTech Sentry3 PDU %(addr)s oid %(oid) outlet "
+ "%(outlet)s: unrecognised power state %(state)s.",
+ {'addr': self.snmp_info['address'],
+ 'oid': oid,
+ 'outlet': self.snmp_info['outlet'],
+ 'state': state})
+ power_state = states.ERROR
+
+ return power_state
+
+ def _snmp_power_on(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_on)
+ self.client.set(oid, value)
+
+ def _snmp_power_off(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_off)
+ self.client.set(oid, value)
+
+
+class SNMPDriverServerTechSentry4(SNMPDriverBase):
+ """SNMP driver class for Server Technology Sentry 4 PDUs.
+
+ https://www.servertech.com/support/sentry-mib-oid-tree-downloads
+
+ SNMP objects for Server Technology Power PDU.
+ 1.3.6.1.4.1.1718.4.1.8.5.1.1<outlet ID> outletStatus
+ notSet (0) fixedOn (1) idleOff (2) idleOn (3) [...more options follow]
+ pendOn (8) pendOff (9) off (10) on (11) [...more options follow]
+ eventOff (16) eventOn (17) eventReboot (18) eventShutdown (19)
+ 1.3.6.1.4.1.1718.4.1.8.5.1.2.<outlet ID> outletControlAction
+ Write 0=no action, 1=on, 2=off, 3=reboot
+ """
+
+ oid_device = (1718, 4, 1, 8, 5, 1)
+ oid_tower_infeed_idx = (1, 1, )
+ oid_power_status = (1,)
+ oid_power_action = (2,)
+
+ notSet = 0
+ fixedOn = 1
+ idleOff = 2
+ idleOn = 3
+ wakeOff = 4
+ wakeOn = 5
+ ocpOff = 6
+ ocpOn = 7
+ status_pendOn = 8
+ status_pendOff = 9
+ status_off = 10
+ status_on = 11
+ reboot = 12
+ shutdown = 13
+ lockedOff = 14
+ lockedOn = 15
+
+ value_power_on = 1
+ value_power_off = 2
+
+ def __init__(self, *args, **kwargs):
+ super(SNMPDriverServerTechSentry4, self).__init__(*args, **kwargs)
+ # Due to its use of different OIDs for different actions, we only form
+ # an OID that holds the common substring of the OIDs for power
+ # operations.
+ self.oid_base = self.oid_enterprise + self.oid_device
+
+ def _snmp_oid(self, oid):
+ """Return the OID for one of the outlet control objects.
+
+ :param oid: The action-dependent portion of the OID, as a tuple of
+ integers.
+ :returns: The full OID as a tuple of integers.
+ """
+
+ outlet = self.snmp_info['outlet']
+ full_oid = self.oid_base + oid + self.oid_tower_infeed_idx + (outlet,)
+ return full_oid
+
+ def _snmp_power_state(self):
+ oid = self._snmp_oid(self.oid_power_status)
+ state = self.client.get(oid)
+
+ # Translate the state to an Ironic power state.
+ if state in (self.status_on, self.status_pendOn, self.idleOn):
+ power_state = states.POWER_ON
+ elif state in (self.status_off, self.status_pendOff):
+ power_state = states.POWER_OFF
+ else:
+ LOG.warning("ServerTech Sentry4 PDU %(addr)s oid %(oid)s outlet "
+ "%(outlet)s: unrecognised power state %(state)s.",
+ {'addr': self.snmp_info['address'],
+ 'oid': oid,
+ 'outlet': self.snmp_info['outlet'],
+ 'state': state})
+ power_state = states.ERROR
+
+ return power_state
+
+ def _snmp_power_on(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_on)
+ self.client.set(oid, value)
+
+ def _snmp_power_off(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_off)
+ self.client.set(oid, value)
+
+
+class SNMPDriverRaritanPDU2(SNMPDriverBase):
+ """SNMP driver class for Raritan PDU2 PDUs.
+
+ http://support.raritan.com/px2/version-2.4.1/mibs/pdu2-mib-020400-39592.txt
+ http://cdn.raritan.com/download/PX/v1.5.20/PDU-MIB.txt
+
+ Command:
+ snmpset -v2c -c private -m+PDU2-MIB <pdu IP address> \
+ PDU2-MIB::switchingOperation.1.4 = cycle
+ snmpset -v2c -c private <pdu IP address> \
+ .1.3.6.1.4.1.13742.6.4.1.2.1.2.1.4 i 2
+ Output:
+ PDU2-MIB::switchingOperation.1.4 = INTEGER: cycle(2)
+ """
+
+ oid_device = (13742, 6, 4, 1, 2, 1)
+ oid_power_action = (2, )
+ oid_power_status = (3, )
+ oid_tower_infeed_idx = (1, )
+
+ unavailable = -1
+ status_open = 0
+ status_closed = 1
+ belowLowerCritical = 2
+ belowLowerWarning = 3
+ status_normal = 4
+ aboveUpperWarning = 5
+ aboveUpperCritical = 6
+ status_on = 7
+ status_off = 8
+ detected = 9
+ notDetected = 10
+ alarmed = 11
+ ok = 12
+ marginal = 13
+ fail = 14
+ yes = 15
+ no = 16
+ standby = 17
+ one = 18
+ two = 19
+ inSync = 20
+ outOfSync = 21
+
+ value_power_on = 1
+ value_power_off = 0
+
+ def __init__(self, *args, **kwargs):
+ super(SNMPDriverRaritanPDU2, self).__init__(*args, **kwargs)
+ # Due to its use of different OIDs for different actions, we only form
+ # an OID that holds the common substring of the OIDs for power
+ # operations.
+ self.oid_base = self.oid_enterprise + self.oid_device
+
+ def _snmp_oid(self, oid):
+ """Return the OID for one of the outlet control objects.
+
+ :param oid: The action-dependent portion of the OID, as a tuple of
+ integers.
+ :returns: The full OID as a tuple of integers.
+ """
+
+ outlet = self.snmp_info['outlet']
+ full_oid = self.oid_base + oid + self.oid_tower_infeed_idx + (outlet,)
+ return full_oid
+
+ def _snmp_power_state(self):
+ oid = self._snmp_oid(self.oid_power_status)
+ state = self.client.get(oid)
+
+ # Translate the state to an Ironic power state.
+ if state == self.status_on:
+ power_state = states.POWER_ON
+ elif state == self.status_off:
+ power_state = states.POWER_OFF
+ else:
+ LOG.warning("Raritan PDU2 PDU %(addr)s oid %(oid)s outlet "
+ "%(outlet)s: unrecognised power state %(state)s.",
+ {'addr': self.snmp_info['address'],
+ 'oid': oid,
+ 'outlet': self.snmp_info['outlet'],
+ 'state': state})
+ power_state = states.ERROR
+
+ return power_state
+
+ def _snmp_power_on(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_on)
+ self.client.set(oid, value)
+
+ def _snmp_power_off(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_off)
+ self.client.set(oid, value)
+
+
+class SNMPDriverVertivGeistPDU(SNMPDriverBase):
+ """SNMP driver class for VertivGeist NU30017L/NU30019L PDU.
+
+ https://mibs.observium.org/mib/GEIST-V5-MIB/
+
+ """
+
+ oid_device = (21239, 5, 2, 3, 5, 1)
+ oid_power_action = (6, )
+ oid_power_status = (4, )
+ oid_tower_infeed_idx = (1, )
+
+ on = 1
+ off = 2
+ on2off = 3
+ off2on = 4
+ rebootOn = 5
+ rebootOff = 5
+ unavailable = 7
+
+ value_power_on = 2
+ value_power_off = 4
+
+ def __init__(self, *args, **kwargs):
+ super(SNMPDriverVertivGeistPDU, self).__init__(*args, **kwargs)
+ # Due to its use of different OIDs for different actions, we only form
+ # an OID that holds the common substring of the OIDs for power
+ # operations.
+ self.oid_base = self.oid_enterprise + self.oid_device
+
+ def _snmp_oid(self, oid):
+ """Return the OID for one of the outlet control objects.
+
+ :param oid: The action-dependent portion of the OID, as a tuple of
+ integers.
+
+ :returns: The full OID as a tuple of integers.
+ """
+
+ outlet = self.snmp_info['outlet']
+ full_oid = self.oid_base + oid + (outlet,)
+ return full_oid
+
+ def _snmp_power_state(self):
+ oid = self._snmp_oid(self.oid_power_status)
+ state = self.client.get(oid)
+
+ # Translate the state to an Ironic power state.
+ if state in (self.on, self.on2off):
+ power_state = states.POWER_ON
+ elif state in (self.off, self.off2on):
+ power_state = states.POWER_OFF
+ else:
+ LOG.warning("Vertiv Geist PDU %(addr)s oid %(oid)s outlet "
+ "%(outlet)s: unrecognised power state %(state)s.",
+ {'addr': self.snmp_info['address'],
+ 'oid': oid,
+ 'outlet': self.snmp_info['outlet'],
+ 'state': state})
+ power_state = states.ERROR
+
+ return power_state
+
+ def _snmp_power_on(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_on)
+ self.client.set(oid, value)
+
+ def _snmp_power_off(self):
+ oid = self._snmp_oid(self.oid_power_action)
+ value = snmp.Integer(self.value_power_off)
+ self.client.set(oid, value)
+
+
class SNMPDriverAuto(SNMPDriverBase):
SYS_OBJ_OID = (1, 3, 6, 1, 2, 1, 1, 2)
@@ -878,6 +1213,10 @@ DRIVER_CLASSES = {
'eatonpower': SNMPDriverEatonPower,
'teltronix': SNMPDriverTeltronix,
'baytech_mrp27': SNMPDriverBaytechMRP27,
+ 'servertech_sentry3': SNMPDriverServerTechSentry3,
+ 'servertech_sentry4': SNMPDriverServerTechSentry4,
+ 'raritan_pdu2': SNMPDriverRaritanPDU2,
+ 'vertivgeist_pdu': SNMPDriverVertivGeistPDU,
'auto': SNMPDriverAuto,
}
diff --git a/ironic/objects/__init__.py b/ironic/objects/__init__.py
index e8de08d5a..ae0307af2 100644
--- a/ironic/objects/__init__.py
+++ b/ironic/objects/__init__.py
@@ -32,6 +32,7 @@ def register_all():
__import__('ironic.objects.deployment')
__import__('ironic.objects.node')
__import__('ironic.objects.node_history')
+ __import__('ironic.objects.node_inventory')
__import__('ironic.objects.port')
__import__('ironic.objects.portgroup')
__import__('ironic.objects.trait')
diff --git a/ironic/objects/node.py b/ironic/objects/node.py
index 7c6c8bc1a..93df5b3c1 100644
--- a/ironic/objects/node.py
+++ b/ironic/objects/node.py
@@ -78,7 +78,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
# Version 1.34: Add lessee field
# Version 1.35: Add network_data field
# Version 1.36: Add boot_mode and secure_boot fields
- VERSION = '1.36'
+ # Version 1.37: Add shard field
+ VERSION = '1.37'
dbapi = db_api.get_instance()
@@ -170,6 +171,7 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
'network_data': object_fields.FlexibleDictField(nullable=True),
'boot_mode': object_fields.StringField(nullable=True),
'secure_boot': object_fields.BooleanField(nullable=True),
+ 'shard': object_fields.StringField(nullable=True),
}
def as_dict(self, secure=False, mask_configdrive=True):
@@ -656,6 +658,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
should be set to empty dict (or removed).
Version 1.36: boot_mode, secure_boot were was added. Defaults are None.
For versions prior to this, it should be set to None or removed.
+ Version 1.37: shard was added. Default is None. For versions prior to
+ this, it should be set to None or removed.
:param target_version: the desired version of the object
:param remove_unavailable_fields: True to remove fields that are
@@ -671,7 +675,7 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
('automated_clean', 28), ('protected_reason', 29),
('owner', 30), ('allocation_id', 31), ('description', 32),
('retired_reason', 33), ('lessee', 34), ('boot_mode', 36),
- ('secure_boot', 36)]
+ ('secure_boot', 36), ('shard', 37)]
for name, minor in fields:
self._adjust_field_to_version(name, None, target_version,
diff --git a/ironic/objects/node_inventory.py b/ironic/objects/node_inventory.py
new file mode 100644
index 000000000..eccc842dd
--- /dev/null
+++ b/ironic/objects/node_inventory.py
@@ -0,0 +1,89 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_versionedobjects import base as object_base
+
+from ironic.db import api as dbapi
+from ironic.objects import base
+from ironic.objects import fields as object_fields
+
+
+@base.IronicObjectRegistry.register
+class NodeInventory(base.IronicObject, object_base.VersionedObjectDictCompat):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ dbapi = dbapi.get_instance()
+
+ fields = {
+ 'id': object_fields.IntegerField(),
+ 'node_id': object_fields.IntegerField(nullable=True),
+ 'inventory_data': object_fields.FlexibleDictField(nullable=True),
+ 'plugin_data': object_fields.FlexibleDictField(nullable=True),
+ }
+
+ @classmethod
+ def _from_node_object(cls, context, node):
+ """Convert a node into a virtual `NodeInventory` object."""
+ result = cls(context)
+ result._update_from_node_object(node)
+ return result
+
+ def _update_from_node_object(self, node):
+ """Update the NodeInventory object from the node."""
+ for src, dest in self.node_mapping.items():
+ setattr(self, dest, getattr(node, src, None))
+ for src, dest in self.instance_info_mapping.items():
+ setattr(self, dest, node.instance_info.get(src))
+
+ @classmethod
+ def get_by_node_id(cls, context, node_id):
+ """Get a NodeInventory object by its node ID.
+
+ :param cls: the :class:`NodeInventory`
+ :param context: Security context
+ :param uuid: The UUID of a NodeInventory.
+ :returns: A :class:`NodeInventory` object.
+ :raises: NodeInventoryNotFound
+
+ """
+ db_inventory = cls.dbapi.get_node_inventory_by_node_id(node_id)
+ inventory = cls._from_db_object(context, cls(), db_inventory)
+ return inventory
+
+ def create(self, context=None):
+ """Create a NodeInventory record in the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: NodeHistory(context)
+ """
+ values = self.do_version_changes_for_db()
+ db_inventory = self.dbapi.create_node_inventory(values)
+ self._from_db_object(self._context, self, db_inventory)
+
+ def destroy(self, context=None):
+ """Delete the NodeInventory from the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: NodeInventory(context)
+ :raises: NodeInventoryNotFound
+ """
+ self.dbapi.destroy_node_inventory_by_node_id(self.node_id)
+ self.obj_reset_changes()
diff --git a/ironic/objects/port.py b/ironic/objects/port.py
index b4d0e78eb..8f6f7ddf0 100644
--- a/ironic/objects/port.py
+++ b/ironic/objects/port.py
@@ -44,7 +44,8 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# change)
# Version 1.9: Add support for Smart NIC port
# Version 1.10: Add name field
- VERSION = '1.10'
+ # Version 1.11: Add node_uuid field
+ VERSION = '1.11'
dbapi = dbapi.get_instance()
@@ -52,6 +53,7 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
'id': object_fields.IntegerField(),
'uuid': object_fields.UUIDField(nullable=True),
'node_id': object_fields.IntegerField(nullable=True),
+ 'node_uuid': object_fields.UUIDField(nullable=True),
'address': object_fields.MACAddressField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
'local_link_connection': object_fields.FlexibleDictField(
@@ -297,6 +299,27 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
project=project)
return cls._from_db_object_list(context, db_ports)
+ @classmethod
+ def list_by_node_shards(cls, context, shards, limit=None, marker=None,
+ sort_key=None, sort_dir=None, project=None):
+ """Return a list of Port objects associated with nodes in shards
+
+ :param context: Security context.
+ :param shards: a list of shards
+ :param limit: maximum number of resources to return in a single result.
+ :param marker: pagination marker for large data sets.
+ :param sort_key: column to sort results by.
+ :param sort_dir: direction to sort. "asc" or "desc".
+ :param project: a node owner or lessee to match against
+ :returns: a list of :class:`Port` object.
+
+ """
+ db_ports = cls.dbapi.get_ports_by_shards(shards, limit=limit,
+ marker=marker,
+ sort_key=sort_key,
+ sort_dir=sort_dir)
+ return cls._from_db_object_list(context, db_ports)
+
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
@@ -377,6 +400,10 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
"""
values = self.do_version_changes_for_db()
db_port = self.dbapi.create_port(values)
+ # NOTE(hjensas): To avoid lazy load issue (DetachedInstanceError) in
+ # sqlalchemy, get new port the port from the DB to ensure the node_uuid
+ # via association_proxy relationship is loaded.
+ db_port = self.dbapi.get_port_by_id(db_port['id'])
self._from_db_object(self._context, self, db_port)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
diff --git a/ironic/objects/portgroup.py b/ironic/objects/portgroup.py
index 8628df731..ef21a5f90 100644
--- a/ironic/objects/portgroup.py
+++ b/ironic/objects/portgroup.py
@@ -36,7 +36,8 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
# Version 1.4: Migrate/copy extra['vif_port_id'] to
# internal_info['tenant_vif_port_id'] (not an explicit db
# change)
- VERSION = '1.4'
+ # Version 1.5: Add node_uuid field
+ VERSION = '1.5'
dbapi = dbapi.get_instance()
@@ -45,6 +46,7 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
'uuid': object_fields.UUIDField(nullable=True),
'name': object_fields.StringField(nullable=True),
'node_id': object_fields.IntegerField(nullable=True),
+ 'node_uuid': object_fields.UUIDField(nullable=True),
'address': object_fields.MACAddressField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
'internal_info': object_fields.FlexibleDictField(nullable=True),
@@ -261,6 +263,10 @@ class Portgroup(base.IronicObject, object_base.VersionedObjectDictCompat):
"""
values = self.do_version_changes_for_db()
db_portgroup = self.dbapi.create_portgroup(values)
+ # NOTE(hjensas): To avoid lazy load issue (DetachedInstanceError) in
+ # sqlalchemy, get new port the port from the DB to ensure the node_uuid
+ # via association_proxy relationship is loaded.
+ db_portgroup = self.dbapi.get_portgroup_by_id(db_portgroup['id'])
self._from_db_object(self._context, self, db_portgroup)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
diff --git a/ironic/tests/base.py b/ironic/tests/base.py
index 4b34ef0a4..348f15c20 100644
--- a/ironic/tests/base.py
+++ b/ironic/tests/base.py
@@ -27,6 +27,7 @@ import subprocess
import sys
import tempfile
from unittest import mock
+import warnings
import eventlet
eventlet.monkey_patch(os=False)
@@ -38,6 +39,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslotest import base as oslo_test_base
+from sqlalchemy import exc as sqla_exc
from ironic.common import config as ironic_config
from ironic.common import context as ironic_context
@@ -70,6 +72,84 @@ def _patch_mock_callable(obj):
return False
+class WarningsFixture(fixtures.Fixture):
+ """Filters out warnings during test runs."""
+
+ def setUp(self):
+ super().setUp()
+
+ self._original_warning_filters = warnings.filters[:]
+
+ # NOTE(sdague): Make deprecation warnings only happen once. Otherwise
+ # this gets kind of crazy given the way that upstream python libs use
+ # this.
+ warnings.simplefilter('once', DeprecationWarning)
+
+ # NOTE(stephenfin): We get way too many of these. Silence them.
+ warnings.filterwarnings(
+ 'ignore',
+ message=(
+ 'Policy enforcement is depending on the value of .*. '
+ 'This key is deprecated. Please update your policy '
+ 'file to use the standard policy values.'
+ ),
+ )
+
+ # NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
+ warnings.filterwarnings(
+ 'ignore',
+ message='Policy .* failed scope check',
+ category=UserWarning,
+ )
+
+ # Enable deprecation warnings to capture upcoming SQLAlchemy changes
+
+ warnings.filterwarnings(
+ 'ignore',
+ category=sqla_exc.SADeprecationWarning,
+ )
+
+ warnings.filterwarnings(
+ 'error',
+ module='ironic',
+ category=sqla_exc.SADeprecationWarning,
+ )
+
+ # Enable general SQLAlchemy warnings also to ensure we're not doing
+ # silly stuff. It's possible that we'll need to filter things out here
+ # with future SQLAlchemy versions, but that's a good thing
+
+ warnings.filterwarnings(
+ 'error',
+ module='ironic',
+ category=sqla_exc.SAWarning,
+ )
+
+ # ...but filter everything out until we get around to fixing them
+ # TODO(stephenfin): Fix all of these
+
+ warnings.filterwarnings(
+ 'ignore',
+ module='ironic',
+ message='SELECT statement has a cartesian product ',
+ category=sqla_exc.SAWarning,
+ )
+
+ # FIXME(stephenfin): We can remove this once oslo.db is fixed
+ # https://review.opendev.org/c/openstack/oslo.db/+/856453
+ warnings.filterwarnings(
+ 'ignore',
+ module='ironic',
+ message='TypeDecorator .* will not produce a cache key',
+ category=sqla_exc.SAWarning,
+ )
+
+ self.addCleanup(self._reset_warning_filters)
+
+ def _reset_warning_filters(self):
+ warnings.filters[:] = self._original_warning_filters
+
+
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
@@ -113,6 +193,7 @@ class TestCase(oslo_test_base.BaseTestCase):
self.addCleanup(hash_ring.HashRingManager().reset)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
+ self.useFixture(WarningsFixture())
driver_factory.HardwareTypesFactory._extension_manager = None
for factory in driver_factory._INTERFACE_LOADERS.values():
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index d7a3d474e..d56652b1e 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -21,6 +21,7 @@ import sys
import tempfile
from unittest import mock
from urllib import parse as urlparse
+import uuid
import fixtures
from oslo_config import cfg
@@ -43,6 +44,8 @@ from ironic.common import indicator_states
from ironic.common import policy
from ironic.common import states
from ironic.conductor import rpcapi
+from ironic.drivers.modules import inspect_utils
+from ironic.drivers.modules import inspector
from ironic import objects
from ironic.objects import fields as obj_fields
from ironic import tests as tests_root
@@ -51,6 +54,7 @@ from ironic.tests.unit.api import base as test_api_base
from ironic.tests.unit.api import utils as test_api_utils
from ironic.tests.unit.objects import utils as obj_utils
+CONF = inspector.CONF
with open(
os.path.join(
@@ -4898,13 +4902,39 @@ class TestPost(test_api_base.BaseApiTest):
ndict = test_api_utils.post_get_test_node(owner='cowsay')
response = self.post_json('/nodes', ndict,
headers={api_base.Version.string:
- str(api_v1.max_version())})
+ str(api_v1.max_version()),
+ 'X-Project-Id': 'cowsay'})
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/nodes/%s' % ndict['uuid'],
headers={api_base.Version.string:
str(api_v1.max_version())})
self.assertEqual('cowsay', result['owner'])
+ def test_create_node_owner_system_scope(self):
+ ndict = test_api_utils.post_get_test_node(owner='catsay')
+ response = self.post_json('/nodes', ndict,
+ headers={api_base.Version.string:
+ str(api_v1.max_version()),
+ 'OpenStack-System-Scope': 'all',
+ 'X-Roles': 'admin'})
+ self.assertEqual(http_client.CREATED, response.status_int)
+ result = self.get_json('/nodes/%s' % ndict['uuid'],
+ headers={api_base.Version.string:
+ str(api_v1.max_version())})
+ self.assertEqual('catsay', result['owner'])
+
+ def test_create_node_owner_recorded_project_scope(self):
+ ndict = test_api_utils.post_get_test_node()
+ response = self.post_json('/nodes', ndict,
+ headers={api_base.Version.string:
+ str(api_v1.max_version()),
+ 'X-Project-Id': 'ravensay'})
+ self.assertEqual(http_client.CREATED, response.status_int)
+ result = self.get_json('/nodes/%s' % ndict['uuid'],
+ headers={api_base.Version.string:
+ str(api_v1.max_version())})
+ self.assertEqual('ravensay', result['owner'])
+
def test_create_node_owner_old_api_version(self):
headers = {api_base.Version.string: '1.32'}
ndict = test_api_utils.post_get_test_node(owner='bob')
@@ -7886,3 +7916,222 @@ class TestNodeHistory(test_api_base.BaseApiTest):
self.assertIn('nodes/%s/history' % self.node.uuid, ret['next'])
self.assertIn('limit=1', ret['next'])
self.assertIn('marker=%s' % result_uuid, ret['next'])
+
+
+class TestNodeInventory(test_api_base.BaseApiTest):
+ fake_inventory_data = {"cpu": "amd"}
+ fake_plugin_data = {"disks": [{"name": "/dev/vda"}]}
+
+ def setUp(self):
+ super(TestNodeInventory, self).setUp()
+ self.version = "1.81"
+ self.node = obj_utils.create_test_node(
+ self.context,
+ provision_state=states.AVAILABLE, name='node-81')
+ self.node.save()
+ self.node.obj_reset_changes()
+
+ def _add_inventory(self):
+ self.inventory = objects.NodeInventory(
+ node_id=self.node.id, inventory_data=self.fake_inventory_data,
+ plugin_data=self.fake_plugin_data)
+ self.inventory.create()
+
+ def test_get_old_version(self):
+ ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
+ headers={api_base.Version.string: "1.80"},
+ expect_errors=True)
+ self.assertEqual(http_client.NOT_FOUND, ret.status_code)
+
+ def test_get_inventory_no_inventory(self):
+ ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
+ headers={api_base.Version.string: self.version},
+ expect_errors=True)
+ self.assertEqual(http_client.NOT_FOUND, ret.status_code)
+
+ def test_get_inventory(self):
+ self._add_inventory()
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
+ ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
+ headers={api_base.Version.string: self.version})
+ self.assertEqual({'inventory': self.fake_inventory_data,
+ 'plugin_data': self.fake_plugin_data}, ret)
+
+ @mock.patch.object(inspect_utils, 'get_introspection_data',
+ autospec=True)
+ def test_get_inventory_exception(self, mock_get_data):
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
+ mock_get_data.side_effect = [
+ exception.NodeInventoryNotFound]
+ ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
+ headers={api_base.Version.string: self.version},
+ expect_errors=True)
+ self.assertEqual(http_client.NOT_FOUND, ret.status_int)
+
+ @mock.patch.object(inspect_utils, '_get_introspection_data_from_swift',
+ autospec=True)
+ def test_get_inventory_swift(self, mock_get_data):
+ CONF.set_override('data_backend', 'swift',
+ group='inventory')
+ mock_get_data.return_value = {"inventory": self.fake_inventory_data,
+ "plugin_data": self.fake_plugin_data}
+ ret = self.get_json('/nodes/%s/inventory' % self.node.uuid,
+ headers={api_base.Version.string: self.version})
+ self.assertEqual({'inventory': self.fake_inventory_data,
+ 'plugin_data': self.fake_plugin_data}, ret)
+
+
+class TestNodeShardGets(test_api_base.BaseApiTest):
+ def setUp(self):
+ super(TestNodeShardGets, self).setUp()
+ p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for',
+ autospec=True)
+ self.mock_gtf = p.start()
+ self.mock_gtf.return_value = 'test-topic'
+ self.addCleanup(p.stop)
+ self.mock_get_conductor_for = self.useFixture(
+ fixtures.MockPatchObject(rpcapi.ConductorAPI, 'get_conductor_for',
+ autospec=True)).mock
+ self.mock_get_conductor_for.return_value = 'fake.conductor'
+ self.node = obj_utils.create_test_node(self.context, shard='foo')
+ self.headers = {api_base.Version.string: '1.82'}
+
+ def test_get_node_shard_field(self):
+ result = self.get_json(
+ '/nodes/%s' % self.node.uuid, headers=self.headers)
+ self.assertEqual('foo', result['shard'])
+
+ def test_get_node_shard_field_fails_wrong_version(self):
+ headers = {api_base.Version.string: '1.80'}
+ result = self.get_json('/nodes/%s' % self.node.uuid, headers=headers)
+ self.assertNotIn('shard', result)
+
+ def test_filtering_by_shard(self):
+ result = self.get_json(
+ '/nodes?shard=foo', fields='shard', headers=self.headers)
+ self.assertEqual(1, len(result['nodes']))
+ self.assertEqual('foo', result['nodes'][0]['shard'])
+
+ def test_filtering_by_shard_fails_wrong_version(self):
+ headers = {api_base.Version.string: '1.80'}
+
+ result = self.get_json('/nodes?shard=foo',
+ expect_errors=True, headers=headers)
+ self.assertEqual(http_client.NOT_ACCEPTABLE, result.status_code)
+
+ def test_filtering_by_single_shard_detail(self):
+ result = self.get_json('/nodes/detail?shard=foo', headers=self.headers)
+ self.assertEqual(1, len(result['nodes']))
+ self.assertEqual('foo', result['nodes'][0]['shard'])
+
+ def test_filtering_by_multi_shard_detail(self):
+ obj_utils.create_test_node(
+ self.context, uuid=uuid.uuid4(), shard='bar')
+ result = self.get_json(
+ '/nodes?shard=foo,bar', headers=self.headers)
+ self.assertEqual(2, len(result['nodes']))
+
+ def test_filtering_by_shard_detail_fails_wrong_version(self):
+ headers = {api_base.Version.string: '1.80'}
+
+ result = self.get_json('/nodes/detail?shard=foo',
+ expect_errors=True, headers=headers)
+ self.assertEqual(http_client.NOT_ACCEPTABLE, result.status_code)
+
+ def test_filtering_by_sharded(self):
+ obj_utils.create_test_node(self.context, uuid=uuid.uuid4())
+ obj_utils.create_test_node(self.context, uuid=uuid.uuid4())
+ # We now have one node in shard foo (setUp) and two unsharded.
+ result_true = self.get_json(
+ '/nodes?sharded=true', headers=self.headers)
+ result_false = self.get_json(
+ '/nodes?sharded=false', headers=self.headers)
+ self.assertEqual(1, len(result_true['nodes']))
+ self.assertEqual(2, len(result_false['nodes']))
+
+
+@mock.patch.object(rpcapi.ConductorAPI, 'create_node',
+ lambda _api, _ctx, node, _topic: _create_node_locally(node))
+class TestNodeShardPost(test_api_base.BaseApiTest):
+ def setUp(self):
+ super(TestNodeShardPost, self).setUp()
+ p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for',
+ autospec=True)
+ self.mock_gtf = p.start()
+ self.mock_gtf.return_value = 'test-topic'
+ self.addCleanup(p.stop)
+ self.chassis = obj_utils.create_test_chassis(self.context)
+
+ def test_create_node_with_shard(self):
+ shard = 'foo'
+ ndict = test_api_utils.post_get_test_node(shard=shard)
+ headers = {api_base.Version.string: '1.82'}
+ response = self.post_json('/nodes', ndict, headers=headers)
+ self.assertEqual(http_client.CREATED, response.status_int)
+
+ result = self.get_json('/nodes/%s' % ndict['uuid'], headers=headers)
+ self.assertEqual(ndict['uuid'], result['uuid'])
+ self.assertEqual(shard, result['shard'])
+
+ def test_create_node_with_shard_fail_wrong_version(self):
+ headers = {api_base.Version.string: '1.80'}
+ shard = 'foo'
+ ndict = test_api_utils.post_get_test_node(shard=shard)
+ response = self.post_json(
+ '/nodes', ndict, expect_errors=True, headers=headers)
+ self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int)
+
+
+class TestNodeShardPatch(test_api_base.BaseApiTest):
+ def setUp(self):
+ super(TestNodeShardPatch, self).setUp()
+ self.node = obj_utils.create_test_node(self.context, name='node-57.1')
+ p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for',
+ autospec=True)
+ self.mock_gtf = p.start()
+ self.mock_gtf.return_value = 'test-topic'
+ self.addCleanup(p.stop)
+ p = mock.patch.object(rpcapi.ConductorAPI, 'update_node',
+ autospec=True)
+ self.mock_update_node = p.start()
+ self.addCleanup(p.stop)
+
+ def test_node_add_shard(self):
+ self.mock_update_node.return_value = self.node
+ (self
+ .mock_update_node
+ .return_value
+ .updated_at) = "2013-12-03T06:20:41.184720+00:00"
+ headers = {api_base.Version.string: '1.82'}
+ shard = 'shard1'
+ body = [{
+ 'path': '/shard',
+ 'value': shard,
+ 'op': 'add',
+ }]
+
+ response = self.patch_json(
+ '/nodes/%s' % self.node.uuid, body, headers=headers)
+ self.assertEqual(http_client.OK, response.status_code)
+ self.mock_update_node.assert_called_once()
+
+ def test_node_add_shard_fail_wrong_version(self):
+ self.mock_update_node.return_value = self.node
+ (self
+ .mock_update_node
+ .return_value
+ .updated_at) = "2013-12-03T06:20:41.184720+00:00"
+ headers = {api_base.Version.string: '1.80'}
+ shard = 'shard1'
+ body = [{
+ 'path': '/shard',
+ 'value': shard,
+ 'op': 'add',
+ }]
+
+ response = self.patch_json('/nodes/%s' % self.node.uuid,
+ body, expect_errors=True, headers=headers)
+ self.mock_update_node.assert_not_called()
+ self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
diff --git a/ironic/tests/unit/api/controllers/v1/test_port.py b/ironic/tests/unit/api/controllers/v1/test_port.py
index 6823c3b51..31885f45f 100644
--- a/ironic/tests/unit/api/controllers/v1/test_port.py
+++ b/ironic/tests/unit/api/controllers/v1/test_port.py
@@ -15,7 +15,6 @@ Tests for the API /ports/ methods.
import datetime
from http import client as http_client
-import types
from unittest import mock
from urllib import parse as urlparse
@@ -194,7 +193,7 @@ class TestPortsController__GetPortsCollection(base.TestCase):
mock_request.context = 'fake-context'
mock_list.return_value = []
self.controller._get_ports_collection(None, None, None, None, None,
- None, 'asc',
+ None, None, 'asc',
resource_url='ports')
mock_list.assert_called_once_with('fake-context', 1000, None,
project=None, sort_dir='asc',
@@ -236,53 +235,6 @@ class TestListPorts(test_api_base.BaseApiTest):
# never expose the node_id
self.assertNotIn('node_id', data['ports'][0])
- # NOTE(jlvillal): autospec=True doesn't work on staticmethods:
- # https://bugs.python.org/issue23078
- @mock.patch.object(objects.Node, 'get_by_id', spec_set=types.FunctionType)
- def test_list_with_deleted_node(self, mock_get_node):
- # check that we don't end up with HTTP 400 when node deletion races
- # with listing ports - see https://launchpad.net/bugs/1748893
- obj_utils.create_test_port(self.context, node_id=self.node.id)
- mock_get_node.side_effect = exception.NodeNotFound('boom')
- data = self.get_json('/ports')
- self.assertEqual([], data['ports'])
-
- # NOTE(jlvillal): autospec=True doesn't work on staticmethods:
- # https://bugs.python.org/issue23078
- @mock.patch.object(objects.Node, 'get_by_id',
- spec_set=types.FunctionType)
- def test_list_detailed_with_deleted_node(self, mock_get_node):
- # check that we don't end up with HTTP 400 when node deletion races
- # with listing ports - see https://launchpad.net/bugs/1748893
- port = obj_utils.create_test_port(self.context, node_id=self.node.id)
- port2 = obj_utils.create_test_port(self.context, node_id=self.node.id,
- uuid=uuidutils.generate_uuid(),
- address='66:44:55:33:11:22')
- mock_get_node.side_effect = [exception.NodeNotFound('boom'), self.node]
- data = self.get_json('/ports/detail')
- # The "correct" port is still returned
- self.assertEqual(1, len(data['ports']))
- self.assertIn(data['ports'][0]['uuid'], {port.uuid, port2.uuid})
- self.assertEqual(self.node.uuid, data['ports'][0]['node_uuid'])
-
- # NOTE(jlvillal): autospec=True doesn't work on staticmethods:
- # https://bugs.python.org/issue23078
- @mock.patch.object(objects.Portgroup, 'get', spec_set=types.FunctionType)
- def test_list_with_deleted_port_group(self, mock_get_pg):
- # check that we don't end up with HTTP 400 when port group deletion
- # races with listing ports - see https://launchpad.net/bugs/1748893
- portgroup = obj_utils.create_test_portgroup(self.context,
- node_id=self.node.id)
- port = obj_utils.create_test_port(self.context, node_id=self.node.id,
- portgroup_id=portgroup.id)
- mock_get_pg.side_effect = exception.PortgroupNotFound('boom')
- data = self.get_json(
- '/ports/detail',
- headers={api_base.Version.string: str(api_v1.max_version())}
- )
- self.assertEqual(port.uuid, data['ports'][0]["uuid"])
- self.assertIsNone(data['ports'][0]["portgroup_uuid"])
-
@mock.patch.object(policy, 'authorize', spec=True)
def test_list_non_admin_forbidden(self, mock_authorize):
def mock_authorize_function(rule, target, creds):
@@ -1129,6 +1081,44 @@ class TestListPorts(test_api_base.BaseApiTest):
response.json['error_message'])
+class TestListPortsByShard(test_api_base.BaseApiTest):
+ def setUp(self):
+ super(TestListPortsByShard, self).setUp()
+ self.headers = {
+ api_base.Version.string: '1.%s' % versions.MINOR_82_NODE_SHARD
+ }
+
+ def _create_port_with_shard(self, shard, address):
+ node = obj_utils.create_test_node(self.context, owner='12345',
+ shard=shard,
+ uuid=uuidutils.generate_uuid())
+ return obj_utils.create_test_port(self.context, name='port_%s' % shard,
+ node_id=node.id, address=address,
+ uuid=uuidutils.generate_uuid())
+
+ def test_get_by_shard_single_fail_api_version(self):
+ self._create_port_with_shard('test_shard', 'aa:bb:cc:dd:ee:ff')
+ data = self.get_json('/ports?shard=test_shard', expect_errors=True)
+ self.assertEqual(406, data.status_int)
+
+ def test_get_by_shard_single(self):
+ port = self._create_port_with_shard('test_shard', 'aa:bb:cc:dd:ee:ff')
+ data = self.get_json('/ports?shard=test_shard', headers=self.headers)
+ self.assertEqual(port.uuid, data['ports'][0]["uuid"])
+
+ def test_get_by_shard_multi(self):
+ bad_shard_address = 'ee:ee:ee:ee:ee:ee'
+ self._create_port_with_shard('shard1', 'aa:bb:cc:dd:ee:ff')
+ self._create_port_with_shard('shard2', 'ab:bb:cc:dd:ee:ff')
+ self._create_port_with_shard('shard3', bad_shard_address)
+
+ res = self.get_json('/ports?shard=shard1,shard2', headers=self.headers)
+ self.assertEqual(2, len(res['ports']))
+ print(res['ports'][0])
+ self.assertNotEqual(res['ports'][0]['address'], bad_shard_address)
+ self.assertNotEqual(res['ports'][1]['address'], bad_shard_address)
+
+
@mock.patch.object(rpcapi.ConductorAPI, 'update_port', autospec=True,
side_effect=_rpcapi_update_port)
class TestPatch(test_api_base.BaseApiTest):
diff --git a/ironic/tests/unit/api/controllers/v1/test_root.py b/ironic/tests/unit/api/controllers/v1/test_root.py
index 78d3053e4..229e88622 100644
--- a/ironic/tests/unit/api/controllers/v1/test_root.py
+++ b/ironic/tests/unit/api/controllers/v1/test_root.py
@@ -147,6 +147,10 @@ class TestV1Routing(api_base.BaseApiTest):
{'href': 'http://localhost/v1/ports/', 'rel': 'self'},
{'href': 'http://localhost/ports/', 'rel': 'bookmark'}
],
+ 'shards': [
+ {'href': 'http://localhost/v1/shards/', 'rel': 'self'},
+ {'href': 'http://localhost/shards/', 'rel': 'bookmark'}
+ ],
'volume': [
{'href': 'http://localhost/v1/volume/', 'rel': 'self'},
{'href': 'http://localhost/volume/', 'rel': 'bookmark'}
diff --git a/ironic/tests/unit/api/controllers/v1/test_shard.py b/ironic/tests/unit/api/controllers/v1/test_shard.py
new file mode 100644
index 000000000..73d30f106
--- /dev/null
+++ b/ironic/tests/unit/api/controllers/v1/test_shard.py
@@ -0,0 +1,80 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests for the API /shards/ methods.
+"""
+
+from http import client as http_client
+import uuid
+
+from ironic.api.controllers import base as api_base
+from ironic.api.controllers import v1 as api_v1
+from ironic.tests.unit.api import base as test_api_base
+from ironic.tests.unit.objects import utils as obj_utils
+
+
+class TestListShards(test_api_base.BaseApiTest):
+ headers = {api_base.Version.string: str(api_v1.max_version())}
+
+ def _create_test_shard(self, name, count):
+ for i in range(count):
+ obj_utils.create_test_node(
+ self.context, uuid=uuid.uuid4(), shard=name)
+
+ def test_empty(self):
+ data = self.get_json('/shards', headers=self.headers)
+ self.assertEqual([], data['shards'])
+
+ def test_one_shard(self):
+ shard = 'shard1'
+ count = 1
+ self._create_test_shard(shard, count)
+ data = self.get_json('/shards', headers=self.headers)
+ self.assertEqual(shard, data['shards'][0]['name'])
+ self.assertEqual(count, data['shards'][0]['count'])
+
+ def test_multiple_shards(self):
+ for i in range(0, 6):
+ self._create_test_shard('shard{}'.format(i), i)
+ data = self.get_json('/shards', headers=self.headers)
+ self.assertEqual(5, len(data['shards']))
+
+ def test_nodes_but_no_shards(self):
+ self._create_test_shard(None, 5)
+ data = self.get_json('/shards', headers=self.headers)
+ self.assertEqual("None", data['shards'][0]['name'])
+ self.assertEqual(5, data['shards'][0]['count'])
+
+ def test_fail_wrong_version(self):
+ headers = {api_base.Version.string: '1.80'}
+ self._create_test_shard('shard1', 1)
+ result = self.get_json(
+ '/shards', expect_errors=True, headers=headers)
+ self.assertEqual(http_client.NOT_FOUND, result.status_int)
+
+ def test_fail_get_one(self):
+ # We do not implement a get /v1/shards/<shard> endpoint
+ # validate it errors properly
+ self._create_test_shard('shard1', 1)
+ result = self.get_json(
+ '/shards/shard1', expect_errors=True, headers=self.headers)
+ self.assertEqual(http_client.NOT_FOUND, result.status_int)
+
+ def test_fail_post(self):
+ result = self.post_json(
+ '/shards', {}, expect_errors=True, headers=self.headers)
+ self.assertEqual(http_client.METHOD_NOT_ALLOWED, result.status_int)
+
+ def test_fail_put(self):
+ result = self.put_json(
+ '/shards', {}, expect_errors=True, headers=self.headers)
+ self.assertEqual(http_client.METHOD_NOT_ALLOWED, result.status_int)
diff --git a/ironic/tests/unit/api/test_acl.py b/ironic/tests/unit/api/test_acl.py
index 5793e95a8..f5cbe498d 100644
--- a/ironic/tests/unit/api/test_acl.py
+++ b/ironic/tests/unit/api/test_acl.py
@@ -81,10 +81,18 @@ class TestACLBase(base.BaseApiTest):
body=None, assert_status=None,
assert_dict_contains=None,
assert_list_length=None,
- deprecated=None):
+ deprecated=None,
+ self_manage_nodes=True):
path = path.format(**self.format_data)
self.mock_auth.side_effect = self._fake_process_request
+ # Set self management override
+ if not self_manage_nodes:
+ cfg.CONF.set_override(
+ 'project_admin_can_manage_own_nodes',
+ False,
+ 'api')
+
# always request the latest api version
version = api_versions.max_version_string()
rheaders = {
@@ -278,6 +286,8 @@ class TestRBACModelBeforeScopesBase(TestACLBase):
db_utils.create_test_node_trait(
node_id=fake_db_node['id'])
fake_history = db_utils.create_test_history(node_id=fake_db_node.id)
+ fake_inventory = db_utils.create_test_inventory(
+ node_id=fake_db_node.id)
# dedicated node for portgroup addition test to avoid
# false positives with test runners.
db_utils.create_test_node(
@@ -301,6 +311,7 @@ class TestRBACModelBeforeScopesBase(TestACLBase):
'volume_target_ident': fake_db_volume_target['uuid'],
'volume_connector_ident': fake_db_volume_connector['uuid'],
'history_ident': fake_history['uuid'],
+ 'node_inventory': fake_inventory,
})
@@ -407,6 +418,8 @@ class TestRBACProjectScoped(TestACLBase):
resource_class="CUSTOM_TEST")
owned_node_history = db_utils.create_test_history(
node_id=owned_node.id)
+ owned_node_inventory = db_utils.create_test_inventory(
+ node_id=owned_node.id)
# Leased nodes
leased_node = db_utils.create_test_node(
@@ -437,6 +450,8 @@ class TestRBACProjectScoped(TestACLBase):
leased_node_history = db_utils.create_test_history(
node_id=leased_node.id)
+ leased_node_inventory = db_utils.create_test_inventory(
+ node_id=leased_node.id)
# Random objects that shouldn't be project visible
other_node = db_utils.create_test_node(
@@ -472,7 +487,9 @@ class TestRBACProjectScoped(TestACLBase):
'owner_allocation': fake_owner_allocation['uuid'],
'lessee_allocation': fake_leased_allocation['uuid'],
'owned_history_ident': owned_node_history['uuid'],
- 'lessee_history_ident': leased_node_history['uuid']})
+ 'lessee_history_ident': leased_node_history['uuid'],
+ 'owned_inventory': owned_node_inventory,
+ 'leased_inventory': leased_node_inventory})
@ddt.file_data('test_rbac_project_scoped.yaml')
@ddt.unpack
diff --git a/ironic/tests/unit/api/test_rbac_project_scoped.yaml b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
index 802600703..0e2107658 100644
--- a/ironic/tests/unit/api/test_rbac_project_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_project_scoped.yaml
@@ -74,6 +74,14 @@ values:
X-Auth-Token: 'third-party-admin-token'
X-Project-Id: ae64129e-b188-4662-b014-4127f4366ee6
X-Roles: admin,manager,member,reader
+ service_headers: &service_headers
+ X-Auth-Token: 'service-token'
+ X-Project-Id: ae64129e-b188-4662-b014-4127f4366ee6
+ X-Roles: service
+ service_headers_owner_project: &service_headers_owner_project
+ X-Auth-Token: 'service-token'
+ X-Project-Id: 70e5e25a-2ca2-4cb1-8ae8-7d8739cee205
+ X-Roles: service
owner_project_id: &owner_project_id 70e5e25a-2ca2-4cb1-8ae8-7d8739cee205
lessee_project_id: &lessee_project_id f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
owned_node_ident: &owned_node_ident f11853c7-fa9c-4db3-a477-c9d8e0dbbf13
@@ -89,35 +97,87 @@ owner_admin_cannot_post_nodes:
body: &node_post_body
name: node
driver: fake-driverz
- assert_status: 500
+ assert_status: 403
+ self_manage_nodes: False
+
+owner_admin_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *owner_admin_headers
+ body: *node_post_body
+ assert_status: 503
+ self_manage_nodes: True
+
+service_nodes_cannot_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 403
+ self_manage_nodes: False
+
+service_nodes_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 503
+ self_manage_nodes: True
owner_manager_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *owner_manager_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
lessee_admin_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *lessee_admin_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
+ self_manage_nodes: False
+
+lessee_admin_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *lessee_admin_headers
+ body: *node_post_body
+ assert_status: 403
+ self_manage_nodes: False
lessee_manager_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *lessee_manager_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
+ self_manage_nodes: False
+
+lessee_manager_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *lessee_manager_headers
+ body: *node_post_body
+ assert_status: 403
+ self_manage_nodes: True
third_party_admin_cannot_post_nodes:
path: '/v1/nodes'
method: post
headers: *third_party_admin_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
+ self_manage_nodes: False
+
+third_party_admin_can_post_nodes:
+ path: '/v1/nodes'
+ method: post
+ headers: *third_party_admin_headers
+ body: *node_post_body
+ assert_status: 503
+ self_manage_nodes: True
# Based on nodes_post_member
owner_member_cannot_post_nodes:
@@ -125,7 +185,7 @@ owner_member_cannot_post_nodes:
method: post
headers: *owner_member_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
# Based on nodes_post_reader
owner_reader_cannot_post_reader:
@@ -133,7 +193,7 @@ owner_reader_cannot_post_reader:
method: post
headers: *owner_reader_headers
body: *node_post_body
- assert_status: 500
+ assert_status: 403
# Based on nodes_get_admin
# TODO: Create 3 nodes, 2 owned, 1 leased where it is also owned.
@@ -671,6 +731,26 @@ owner_admin_cannot_delete_nodes:
method: delete
headers: *owner_admin_headers
assert_status: 403
+ self_manage_nodes: False
+
+owner_admin_can_delete_nodes:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: delete
+ headers: *owner_admin_headers
+ assert_status: 503
+ self_manage_nodes: True
+
+service_cannot_delete_owner_admin_nodes:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
+service_can_delete_nodes_in_own_project:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 403
owner_manager_cannot_delete_nodes:
path: '/v1/nodes/{owner_node_ident}'
@@ -1262,7 +1342,6 @@ lessee_manager_can_change_provision_state:
body: *provision_body
assert_status: 503
-
lessee_member_cannot_change_provision_state:
path: '/v1/nodes/{lessee_node_ident}/states/provision'
method: put
@@ -1277,6 +1356,20 @@ third_party_admin_cannot_change_provision_state:
body: *provision_body
assert_status: 404
+service_can_change_provision_state_for_own_nodes:
+ path: '/v1/nodes/{owner_node_ident}/states/provision'
+ method: put
+ headers: *service_headers_owner_project
+ body: *provision_body
+ assert_status: 503
+
+service_cannot_change_provision_state:
+ path: '/v1/nodes/{owner_node_ident}/states/provision'
+ method: put
+ headers: *service_headers
+ body: *provision_body
+ assert_status: 404
+
# Raid configuration
owner_admin_can_set_raid_config:
@@ -1319,6 +1412,13 @@ owner_member_can_set_raid_config:
body: *raid_body
assert_status: 503
+owner_member_can_set_raid_config:
+ path: '/v1/nodes/{lessee_node_ident}/states/raid'
+ method: put
+ headers: *service_headers_owner_project
+ body: *raid_body
+ assert_status: 503
+
lessee_member_cannot_set_raid_config:
path: '/v1/nodes/{lessee_node_ident}/states/raid'
method: put
@@ -1333,6 +1433,14 @@ third_party_admin_cannot_set_raid_config:
body: *raid_body
assert_status: 404
+service_cannot_set_raid_config:
+ path: '/v1/nodes/{lessee_node_ident}/states/raid'
+ method: put
+ headers: *service_headers
+ body: *raid_body
+ assert_status: 404
+
+
# Console
owner_admin_can_get_console:
@@ -1347,6 +1455,12 @@ owner_manager_can_get_console:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_get_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_cannot_get_console:
path: '/v1/nodes/{lessee_node_ident}/states/console'
method: get
@@ -1432,6 +1546,20 @@ lessee_member_cannot_set_console:
body: *console_body_put
assert_status: 403
+owner_service_can_set_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: put
+ headers: *service_headers_owner_project
+ body: *console_body_put
+ assert_status: 503
+
+service_cannot_set_console:
+ path: '/v1/nodes/{owner_node_ident}/states/console'
+ method: put
+ headers: *service_headers
+ body: *console_body_put
+ assert_status: 404
+
# Vendor Passthru - https://docs.openstack.org/api-ref/baremetal/?expanded=#node-vendor-passthru-nodes
# owner/lessee vendor passthru methods inaccessible
@@ -1450,6 +1578,12 @@ owner_manager_cannot_get_vendor_passthru_methods:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_get_vendor_passthru_methods:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru/methods'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_get_vendor_passthru_methods:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru/methods'
method: get
@@ -1499,6 +1633,12 @@ owner_manager_cannot_get_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_get_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_get_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: get
@@ -1549,6 +1689,12 @@ owner_manager_cannot_post_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_post_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_post_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: post
@@ -1599,6 +1745,12 @@ owner_manager_cannot_put_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_put_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: put
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_put_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: put
@@ -1649,6 +1801,12 @@ owner_manager_cannot_delete_vendor_passthru:
headers: *owner_manager_headers
assert_status: 403
+owner_service_cannot_delete_vendor_passthru:
+ path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 403
+
owner_member_cannot_delete_vendor_passthru:
path: '/v1/nodes/{owner_node_ident}/vendor_passthru?method=test'
method: delete
@@ -1693,6 +1851,12 @@ owner_reader_get_traits:
headers: *owner_reader_headers
assert_status: 200
+owner_reader_get_traits:
+ path: '/v1/nodes/{owner_node_ident}/traits'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_get_traits:
path: '/v1/nodes/{lessee_node_ident}/traits'
method: get
@@ -1722,6 +1886,13 @@ owner_manager_can_put_traits:
assert_status: 503
body: *traits_body
+owner_service_can_put_traits:
+ path: '/v1/nodes/{owner_node_ident}/traits'
+ method: put
+ headers: *service_headers_owner_project
+ assert_status: 503
+ body: *traits_body
+
owner_member_cannot_put_traits:
path: '/v1/nodes/{owner_node_ident}/traits'
method: put
@@ -1757,6 +1928,13 @@ third_party_admin_cannot_put_traits:
assert_status: 404
body: *traits_body
+third_party_admin_cannot_put_traits:
+ path: '/v1/nodes/{lessee_node_ident}/traits'
+ method: put
+ headers: *service_headers
+ assert_status: 404
+ body: *traits_body
+
owner_admin_can_delete_traits:
path: '/v1/nodes/{owner_node_ident}/traits/{trait}'
method: delete
@@ -1873,6 +2051,21 @@ owner_admin_can_post_vifs:
body: &vif_body
id: ee21d58f-5de2-4956-85ff-33935ea1ca00
+service_can_post_vifs_for_own_project:
+ path: '/v1/nodes/{owner_node_ident}/vifs'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 503
+ body: *vif_body
+
+service_cannot_post_vifs_for_other_project:
+ path: '/v1/nodes/{owner_node_ident}/vifs'
+ method: post
+ headers: *service_headers
+ # NOTE(TheJulia): This is a 404 because the node should not be visible.
+ assert_status: 404
+ body: *vif_body
+
owner_manager_can_post_vifs:
path: '/v1/nodes/{owner_node_ident}/vifs'
method: post
@@ -1971,6 +2164,18 @@ third_party_admin_cannot_delete_vifs:
headers: *third_party_admin_headers
assert_status: 404
+service_can_delete_vifs:
+ path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
+service_cannot_delete_other_nodes_vifs:
+ path: '/v1/nodes/{owner_node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Indicators - https://docs.openstack.org/api-ref/baremetal/#indicators-management
owner_readers_can_get_indicators:
path: '/v1/nodes/{owner_node_ident}/management/indicators'
@@ -2034,6 +2239,14 @@ owner_reader_can_list_portgroups:
assert_list_length:
portgroups: 2
+owner_service_can_list_portgroups:
+ path: '/v1/portgroups'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ portgroups: 2
+
lessee_reader_can_list_portgroups:
path: '/v1/portgroups'
method: get
@@ -2078,6 +2291,13 @@ owner_admin_can_add_portgroup:
node_uuid: 1ab63b9e-66d7-4cd7-8618-dddd0f9f7881
assert_status: 201
+owner_service_can_add_portgroup:
+ path: '/v1/portgroups'
+ method: post
+ headers: *service_headers_owner_project
+ body: *owner_portgroup_body
+ assert_status: 201
+
owner_manager_can_add_portgroup:
path: '/v1/portgroups'
method: post
@@ -2193,6 +2413,12 @@ owner_member_cannot_delete_portgroup:
headers: *owner_member_headers
assert_status: 403
+owner_service_can_delete_portgroup:
+ path: '/v1/portgroups/{owner_portgroup_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_cannot_delete_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}'
method: delete
@@ -2217,6 +2443,12 @@ third_party_admin_cannot_delete_portgroup:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_portgroup:
+ path: '/v1/portgroups/{lessee_portgroup_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Portgroups by node - https://docs.openstack.org/api-ref/baremetal/#listing-portgroups-by-node-nodes-portgroups
owner_reader_can_get_node_portgroups:
@@ -2237,6 +2469,13 @@ third_party_admin_cannot_get_portgroups:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_portgroups:
+ path: '/v1/nodes/{lessee_node_ident}/portgroups'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
+
# Ports - https://docs.openstack.org/api-ref/baremetal/#ports-ports
# Based on ports_* tests
@@ -2250,6 +2489,15 @@ owner_reader_can_list_ports:
assert_list_length:
ports: 3
+owner_service_can_list_ports:
+ path: '/v1/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ # Two ports owned, one on the leased node. 1 invisible.
+ assert_list_length:
+ ports: 3
+
lessee_reader_can_list_ports:
path: '/v1/ports'
method: get
@@ -2272,6 +2520,12 @@ owner_reader_can_read_port:
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_read_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_read_port:
path: '/v1/ports/{lessee_port_ident}'
method: get
@@ -2318,6 +2572,13 @@ owner_manager_cannot_add_ports_to_other_nodes:
body: *other_node_add_port_body
assert_status: 403
+owner_service_cannot_add_ports_to_other_nodes:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers_owner_project
+ body: *other_node_add_port_body
+ assert_status: 403
+
owner_member_cannot_add_port:
path: '/v1/ports'
method: post
@@ -2355,6 +2616,20 @@ third_party_admin_cannot_add_port:
body: *lessee_port_body
assert_status: 403
+service_can_add_port:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers_owner_project
+ body: *owner_port_body
+ assert_status: 503
+
+service_cannot_add_ports_to_other_project:
+ path: '/v1/ports'
+ method: post
+ headers: *service_headers
+ body: *owner_port_body
+ assert_status: 403
+
owner_admin_can_modify_port:
path: '/v1/ports/{owner_port_ident}'
method: patch
@@ -2372,6 +2647,13 @@ owner_manager_can_modify_port:
body: *port_patch_body
assert_status: 503
+owner_service_can_modify_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: patch
+ headers: *service_headers_owner_project
+ body: *port_patch_body
+ assert_status: 503
+
owner_member_cannot_modify_port:
path: '/v1/ports/{owner_port_ident}'
method: patch
@@ -2419,6 +2701,12 @@ owner_manager_can_delete_port:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_delete_port:
+ path: '/v1/ports/{owner_port_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
owner_member_cannot_delete_port:
path: '/v1/ports/{owner_port_ident}'
method: delete
@@ -2459,6 +2747,14 @@ owner_reader_can_get_node_ports:
assert_list_length:
ports: 2
+owner_service_can_get_node_ports:
+ path: '/v1/nodes/{owner_node_ident}/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ ports: 2
+
lessee_reader_can_get_node_port:
path: '/v1/nodes/{lessee_node_ident}/ports'
method: get
@@ -2473,6 +2769,12 @@ third_party_admin_cannot_get_ports:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_ports:
+ path: '/v1/nodes/{lessee_node_ident}/ports'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Ports by portgroup - https://docs.openstack.org/api-ref/baremetal/#listing-ports-by-portgroup-portgroup-ports
# Based on portgroups_ports_get* tests
@@ -2483,6 +2785,12 @@ owner_reader_can_get_ports_by_portgroup:
headers: *owner_reader_headers
assert_status: 200
+owner_service_cam_get_ports_by_portgroup:
+ path: '/v1/portgroups/{owner_portgroup_ident}/ports'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_ports_by_portgroup:
path: '/v1/portgroups/{lessee_portgroup_ident}/ports'
method: get
@@ -2495,6 +2803,13 @@ third_party_admin_cannot_get_ports_by_portgroup:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_ports_by_portgroup:
+ path: '/v1/portgroups/{other_portgroup_ident}/ports'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
+
# Volume(s) - https://docs.openstack.org/api-ref/baremetal/#volume-volume
# TODO(TheJulia): volumes will likely need some level of exhaustive testing.
# i.e. ensure that the volume is permissible. However this may not be possible
@@ -2543,6 +2858,13 @@ owner_manager_can_post_volume_connector:
assert_status: 201
body: *volume_connector_body
+owner_service_can_post_volume_connector:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 201
+ body: *volume_connector_body
+
lessee_admin_cannot_post_volume_connector:
path: '/v1/volume/connectors'
method: post
@@ -2564,6 +2886,13 @@ third_party_admin_cannot_post_volume_connector:
assert_status: 403
body: *volume_connector_body
+service_admin_cannot_post_volume_connector:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers
+ assert_status: 403
+ body: *volume_connector_body
+
owner_reader_can_get_volume_connector:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
@@ -2654,6 +2983,12 @@ owner_manager_can_delete_volume_connectors:
headers: *owner_manager_headers
assert_status: 503
+owner_service_can_delete_volume_connectors:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_delete_volume_connectors:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
@@ -2672,6 +3007,12 @@ third_party_admin_cannot_delete_volume_connector:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_volume_connector:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Volume targets
# TODO(TheJulia): Create at least 3 targets.
@@ -2732,6 +3073,13 @@ owner_admin_create_volume_target:
boot_index: 2
volume_id: 'test-id'
+owner_service_create_volume_target:
+ path: '/v1/volume/targets'
+ method: post
+ headers: *service_headers_owner_project
+ assert_status: 201
+ body: *volume_target_body
+
owner_manager_create_volume_target:
path: '/v1/volume/targets'
method: post
@@ -2782,6 +3130,13 @@ owner_member_can_patch_volume_target:
headers: *owner_member_headers
assert_status: 503
+owner_service_can_patch_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_patch_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: patch
@@ -2810,6 +3165,13 @@ third_party_admin_cannot_patch_volume_target:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_patch_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers
+ assert_status: 404
+
owner_admin_can_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -2822,6 +3184,12 @@ owner_manager_can_delete_volume_target:
headers: *owner_manager_headers
assert_status: 503
+owner_manager_can_delete_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers_owner_project
+ assert_status: 503
+
lessee_admin_can_delete_volume_target:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -2852,6 +3220,12 @@ third_party_admin_cannot_delete_volume_target:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_delete_volume_target:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 404
+
# Get Volumes by Node - https://docs.openstack.org/api-ref/baremetal/#listing-volume-resources-by-node-nodes-volume
owner_reader_can_get_volume_connectors:
@@ -2860,6 +3234,12 @@ owner_reader_can_get_volume_connectors:
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_get_volume_connectors:
+ path: '/v1/nodes/{owner_node_ident}/volume/connectors'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_node_volume_connectors:
path: '/v1/nodes/{lessee_node_ident}/volume/connectors'
method: get
@@ -2872,12 +3252,24 @@ third_party_admin_cannot_get_node_volume_connectors:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_get_node_volume_connectors:
+ path: '/v1/nodes/{lessee_node_ident}/volume/connectors'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
owner_reader_can_get_node_volume_targets:
path: '/v1/nodes/{owner_node_ident}/volume/targets'
method: get
headers: *owner_reader_headers
assert_status: 200
+owner_service_can_read_get_node_volume_targets:
+ path: '/v1/nodes/{owner_node_ident}/volume/targets'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
lessee_reader_can_get_node_volume_targets:
path: '/v1/nodes/{lessee_node_ident}/volume/targets'
method: get
@@ -2890,6 +3282,12 @@ third_part_admin_cannot_read_node_volume_targets:
headers: *third_party_admin_headers
assert_status: 404
+service_cannot_read_node_volume_targets:
+ path: '/v1/nodes/{lessee_node_ident}/volume/targets'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Drivers - https://docs.openstack.org/api-ref/baremetal/#drivers-drivers
# This is a system scoped endpoint, everything should fail in this section.
@@ -2912,6 +3310,12 @@ third_party_admin_cannot_get_drivers:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_drivers:
+ path: '/v1/drivers'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
# Driver vendor passthru - https://docs.openstack.org/api-ref/baremetal/#driver-vendor-passthru-drivers
# This is a system scoped endpoint, everything should fail in this section.
@@ -2934,6 +3338,12 @@ third_party_admin_cannot_get_drivers_vendor_passthru:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_drivers_vendor_passthru:
+ path: '/v1/drivers/{driver_name}/vendor_passthru/methods'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
# Node Bios - https://docs.openstack.org/api-ref/baremetal/#node-bios-nodes
owner_reader_can_get_bios_setttings:
@@ -2954,6 +3364,18 @@ third_party_admin_cannot_get_bios_settings:
headers: *third_party_admin_headers
assert_status: 404
+service_can_get_bios_setttings_owner_project:
+ path: '/v1/nodes/{owner_node_ident}/bios'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
+service_cannot_get_bios_setttings:
+ path: '/v1/nodes/{owner_node_ident}/bios'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
# Conductors - https://docs.openstack.org/api-ref/baremetal/#allocations-allocations
# This is a system scoped endpoint, everything should fail in this section.
@@ -3227,7 +3649,7 @@ third_party_admin_cannot_get_deploy_templates:
third_party_admin_cannot_post_deploy_template:
path: '/v1/deploy_templates'
method: post
- body:
+ body: &deploy_template
name: 'CUSTOM_TEST_TEMPLATE'
steps:
- interface: 'deploy'
@@ -3237,6 +3659,19 @@ third_party_admin_cannot_post_deploy_template:
headers: *third_party_admin_headers
assert_status: 500
+service_cannot_get_deploy_templates:
+ path: '/v1/deploy_templates'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
+service_cannot_post_deploy_template:
+ path: '/v1/deploy_templates'
+ method: post
+ body: *deploy_template
+ headers: *service_headers
+ assert_status: 500
+
# Chassis endpoints - https://docs.openstack.org/api-ref/baremetal/#chassis-chassis
# This is a system scoped endpoint, everything should fail in this section.
@@ -3267,6 +3702,20 @@ third_party_admin_cannot_create_chassis:
description: 'test-chassis'
assert_status: 500
+service_cannot_access_chassis:
+ path: '/v1/chassis'
+ method: get
+ headers: *service_headers
+ assert_status: 500
+
+service_cannot_create_chassis:
+ path: '/v1/chassis'
+ method: post
+ headers: *service_headers
+ body:
+ description: 'test-chassis'
+ assert_status: 500
+
# Node history entries
node_history_get_admin:
@@ -3293,6 +3742,20 @@ node_history_get_reader:
assert_list_length:
history: 1
+node_history_get_service:
+ path: '/v1/nodes/{owner_node_ident}/history'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+ assert_list_length:
+ history: 1
+
+node_history_get_service_cannot_be_retrieved:
+ path: '/v1/nodes/{owner_node_ident}/history'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
node_history_get_entry_admin:
path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
method: get
@@ -3347,6 +3810,12 @@ lessee_node_history_get_entry_reader:
headers: *lessee_reader_headers
assert_status: 404
+owner_service_node_history_get_entry_reader:
+ path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
+ method: get
+ headers: *service_headers_owner_project
+ assert_status: 200
+
third_party_admin_cannot_get_node_history:
path: '/v1/nodes/{owner_node_ident}'
method: get
@@ -3358,3 +3827,64 @@ node_history_get_entry_admin:
method: get
headers: *third_party_admin_headers
assert_status: 404
+
+node_history_get_entry_service:
+ path: '/v1/nodes/{owner_node_ident}/history/{owned_history_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 404
+
+# Node inventory support
+
+node_inventory_get_admin:
+ path: '/v1/nodes/{owner_node_ident}/inventory'
+ method: get
+ headers: *owner_admin_headers
+ assert_status: 200
+
+node_inventory_get_member:
+ path: '/v1/nodes/{owner_node_ident}/inventory'
+ method: get
+ headers: *owner_member_headers
+ assert_status: 200
+
+node_inventory_get_reader:
+ path: '/v1/nodes/{owner_node_ident}/inventory'
+ method: get
+ headers: *owner_reader_headers
+ assert_status: 200
+
+lessee_node_inventory_get_admin:
+ path: '/v1/nodes/{node_ident}/inventory'
+ method: get
+ headers: *lessee_admin_headers
+ assert_status: 404
+
+lessee_node_inventory_get_member:
+ path: '/v1/nodes/{node_ident}/inventory'
+ method: get
+ headers: *lessee_member_headers
+ assert_status: 404
+
+lessee_node_inventory_get_reader:
+ path: '/v1/nodes/{node_ident}/inventory'
+ method: get
+ headers: *lessee_reader_headers
+ assert_status: 404
+
+# Shard support - system scoped req'd to set on a node or view via /v1/shards
+shard_get_shards_disallowed:
+ path: '/v1/shards'
+ method: get
+ headers: *owner_reader_headers
+ assert_status: 403
+
+shard_patch_set_node_shard_disallowed:
+ path: '/v1/nodes/{owner_node_ident}'
+ method: patch
+ headers: *owner_admin_headers
+ body:
+ - op: replace
+ path: /shard
+ value: 'TestShard'
+ assert_status: 403
diff --git a/ironic/tests/unit/api/test_rbac_system_scoped.yaml b/ironic/tests/unit/api/test_rbac_system_scoped.yaml
index d74a5fcae..16f0fded6 100644
--- a/ironic/tests/unit/api/test_rbac_system_scoped.yaml
+++ b/ironic/tests/unit/api/test_rbac_system_scoped.yaml
@@ -23,6 +23,10 @@ values:
X-Project-ID: a1111111111111111111111111111111
X-Roles: admin
X-Project-Name: 'other-project'
+ service_headers: &service_headers
+ X-Auth-Token: 'baremetal-service-token'
+ X-Roles: service
+ OpenStack-System-Scope: all
owner_project_id: &owner_project_id '{owner_project_id}'
other_project_id: &other_project_id '{other_project_id}'
node_ident: &node_ident '{node_ident}'
@@ -52,6 +56,13 @@ nodes_post_reader:
body: *node_post_body
assert_status: 403
+nodes_post_service:
+ path: '/v1/nodes'
+ method: post
+ headers: *service_headers
+ body: *node_post_body
+ assert_status: 503
+
nodes_get_node_admin:
path: '/v1/nodes/{node_ident}'
method: get
@@ -92,6 +103,14 @@ nodes_get_admin:
nodes: 3
assert_status: 200
+nodes_get_service:
+ path: '/v1/nodes'
+ method: get
+ headers: *service_headers
+ assert_list_length:
+ nodes: 3
+ assert_status: 200
+
nodes_get_other_admin:
path: '/v1/nodes'
method: get
@@ -119,6 +138,12 @@ nodes_detail_get_reader:
headers: *reader_headers
assert_status: 200
+nodes_detail_get_service:
+ path: '/v1/nodes/detail'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
nodes_node_ident_get_admin:
path: '/v1/nodes/{node_ident}'
method: get
@@ -187,6 +212,12 @@ nodes_node_ident_delete_admin:
headers: *admin_headers
assert_status: 503
+nodes_node_ident_delete_service:
+ path: '/v1/nodes/{node_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 403
+
nodes_node_ident_delete_member:
path: '/v1/nodes/{node_ident}'
method: delete
@@ -337,7 +368,6 @@ nodes_management_inject_nmi_put_reader:
body: {}
assert_status: 403
-
nodes_states_get_admin:
path: '/v1/nodes/{node_ident}/states'
method: get
@@ -448,6 +478,13 @@ nodes_states_provision_put_reader:
body: *provision_body
assert_status: 403
+nodes_states_provision_put_service:
+ path: '/v1/nodes/{node_ident}/states/provision'
+ method: put
+ headers: *service_headers
+ body: *provision_body
+ assert_status: 503
+
nodes_states_raid_put_admin:
path: '/v1/nodes/{node_ident}/states/raid'
method: put
@@ -486,12 +523,18 @@ nodes_states_console_get_member:
headers: *scoped_member_headers
assert_status: 503
-nodes_states_console_get_admin:
+nodes_states_console_get_reader:
path: '/v1/nodes/{node_ident}/states/console'
method: get
headers: *reader_headers
assert_status: 403
+nodes_states_console_get_service:
+ path: '/v1/nodes/{node_ident}/states/console'
+ method: get
+ headers: *service_headers
+ assert_status: 503
+
nodes_states_console_put_admin:
path: '/v1/nodes/{node_ident}/states/console'
method: put
@@ -514,6 +557,13 @@ nodes_states_console_put_reader:
body: *console_body_put
assert_status: 403
+nodes_states_console_put_service:
+ path: '/v1/nodes/{node_ident}/states/console'
+ method: put
+ headers: *service_headers
+ body: *console_body_put
+ assert_status: 503
+
# Node Traits - https://docs.openstack.org/api-ref/baremetal/?expanded=#node-vendor-passthru-nodes
# Calls conductor upon the get as a task is required.
@@ -729,6 +779,12 @@ nodes_vifs_get_reader:
headers: *reader_headers
assert_status: 503
+nodes_vifs_get_service:
+ path: '/v1/nodes/{node_ident}/vifs'
+ method: get
+ headers: *service_headers
+ assert_status: 503
+
nodes_vifs_post_admin:
path: '/v1/nodes/{node_ident}/vifs'
method: post
@@ -751,6 +807,13 @@ nodes_vifs_post_reader:
assert_status: 403
body: *vif_body
+nodes_vifs_post_service:
+ path: '/v1/nodes/{node_ident}/vifs'
+ method: post
+ headers: *service_headers
+ assert_status: 503
+ body: *vif_body
+
# This calls the conductor, hence not status 403.
nodes_vifs_node_vif_ident_delete_admin:
path: '/v1/nodes/{node_ident}/vifs/{vif_ident}'
@@ -770,6 +833,12 @@ nodes_vifs_node_vif_ident_delete_reader:
headers: *reader_headers
assert_status: 403
+nodes_vifs_node_vif_ident_delete_service:
+ path: '/v1/nodes/{node_ident}/vifs/{vif_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Indicators - https://docs.openstack.org/api-ref/baremetal/#indicators-management
nodes_management_indicators_get_allow:
@@ -1182,6 +1251,12 @@ volume_get_reader:
headers: *reader_headers
assert_status: 200
+volume_get_service:
+ path: '/v1/volume'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# Volume connectors
volume_connectors_get_admin:
@@ -1202,6 +1277,12 @@ volume_connectors_get_reader:
headers: *reader_headers
assert_status: 200
+volume_connectors_get_service:
+ path: '/v1/volume/connectors'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): This ends up returning a 400 due to the
# UUID not already being in ironic.
volume_connectors_post_admin:
@@ -1230,6 +1311,13 @@ volume_connectors_post_reader:
assert_status: 403
body: *volume_connector_body
+volume_connectors_post_service:
+ path: '/v1/volume/connectors'
+ method: post
+ headers: *service_headers
+ assert_status: 201
+ body: *volume_connector_body
+
volume_volume_connector_id_get_admin:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: get
@@ -1272,6 +1360,13 @@ volume_volume_connector_id_patch_reader:
body: *connector_patch_body
assert_status: 403
+volume_volume_connector_id_patch_service:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: patch
+ headers: *service_headers
+ body: *connector_patch_body
+ assert_status: 503
+
volume_volume_connector_id_delete_admin:
path: '/v1/volume/connectors/{volume_connector_ident}'
method: delete
@@ -1290,6 +1385,12 @@ volume_volume_connector_id_delete_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_connector_id_delete_service:
+ path: '/v1/volume/connectors/{volume_connector_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Volume targets
volume_targets_get_admin:
@@ -1310,6 +1411,12 @@ volume_targets_get_reader:
headers: *reader_headers
assert_status: 200
+volume_targets_get_service:
+ path: '/v1/volume/targets'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): Because we can't seem to get the uuid
# to load from an existing uuid, since we're not subsituting
# it, this will return with 400 due to the ID not matching.
@@ -1360,6 +1467,12 @@ volume_volume_target_id_get_reader:
headers: *reader_headers
assert_status: 200
+volume_volume_target_id_get_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
# NOTE(TheJulia): This triggers a call to the conductor and
# thus will fail, but does not return a 403 which means success.
volume_volume_target_id_patch_admin:
@@ -1386,6 +1499,13 @@ volume_volume_target_id_patch_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_target_id_patch_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: patch
+ body: *volume_target_patch
+ headers: *service_headers
+ assert_status: 503
+
volume_volume_target_id_delete_admin:
path: '/v1/volume/targets/{volume_target_ident}'
method: delete
@@ -1404,6 +1524,12 @@ volume_volume_target_id_delete_reader:
headers: *reader_headers
assert_status: 403
+volume_volume_target_id_delete_service:
+ path: '/v1/volume/targets/{volume_target_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 503
+
# Get Volumes by Node - https://docs.openstack.org/api-ref/baremetal/#listing-volume-resources-by-node-nodes-volume
nodes_volume_get_admin:
@@ -2002,6 +2128,12 @@ chassis_get_reader:
headers: *reader_headers
assert_status: 200
+chassis_get_service:
+ path: '/v1/chassis'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
chassis_detail_get_admin:
path: '/v1/chassis/detail'
method: get
@@ -2080,6 +2212,12 @@ chassis_chassis_id_delete_reader:
headers: *reader_headers
assert_status: 403
+chassis_chassis_id_delete_service:
+ path: '/v1/chassis/{chassis_ident}'
+ method: delete
+ headers: *service_headers
+ assert_status: 403
+
# Node history entries
node_history_get_admin:
@@ -2106,6 +2244,14 @@ node_history_get_reader:
assert_list_length:
history: 1
+node_history_get_service:
+ path: '/v1/nodes/{node_ident}/history'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+ assert_list_length:
+ history: 1
+
node_history_get_entry_admin:
path: '/v1/nodes/{node_ident}/history/{history_ident}'
method: get
@@ -2123,3 +2269,47 @@ node_history_get_entry_reader:
method: get
headers: *reader_headers
assert_status: 200
+
+# Node inventory support
+
+node_inventory_get_admin:
+ path: '/v1/nodes/{node_ident}/inventory'
+ method: get
+ headers: *admin_headers
+ assert_status: 200
+
+node_inventory_get_reader:
+ path: '/v1/nodes/{node_ident}/inventory'
+ method: get
+ headers: *reader_headers
+ assert_status: 200
+
+node_history_get_entry_service:
+ path: '/v1/nodes/{node_ident}/history/{history_ident}'
+ method: get
+ headers: *service_headers
+ assert_status: 200
+
+# Shard support
+shard_get_shards:
+ path: '/v1/shards'
+ method: get
+ headers: *reader_headers
+ assert_status: 200
+
+shard_patch_set_node_shard:
+ path: '/v1/nodes/{node_ident}'
+ method: patch
+ headers: *admin_headers
+ body: &replace_shard
+ - op: replace
+ path: /shard
+ value: 'TestShard'
+ assert_status: 503
+
+shard_patch_set_node_shard_disallowed:
+ path: '/v1/nodes/{node_ident}'
+ method: patch
+ headers: *scoped_member_headers
+ body: *replace_shard
+ assert_status: 403
diff --git a/ironic/tests/unit/cmd/test_status.py b/ironic/tests/unit/cmd/test_status.py
index f776e2d51..2d044cc13 100644
--- a/ironic/tests/unit/cmd/test_status.py
+++ b/ironic/tests/unit/cmd/test_status.py
@@ -14,6 +14,7 @@
from unittest import mock
+from oslo_db import sqlalchemy
from oslo_upgradecheck.upgradecheck import Code
from ironic.cmd import dbsync
@@ -38,3 +39,84 @@ class TestUpgradeChecks(db_base.DbTestCase):
check_result = self.cmd._check_obj_versions()
self.assertEqual(Code.FAILURE, check_result.code)
self.assertEqual(msg, check_result.details)
+
+ def test__check_allocations_table_ok(self):
+ check_result = self.cmd._check_allocations_table()
+ self.assertEqual(Code.SUCCESS,
+ check_result.code)
+
+ @mock.patch.object(sqlalchemy.enginefacade.reader,
+ 'get_engine', autospec=True)
+ def test__check_allocations_table_latin1(self, mock_reader):
+ mock_engine = mock.Mock()
+ mock_res = mock.Mock()
+ mock_res.all.return_value = (
+ '... ENGINE=InnoDB DEFAULT CHARSET=latin1',
+ )
+ mock_engine.url = '..mysql..'
+ mock_engine.execute.return_value = mock_res
+ mock_reader.return_value = mock_engine
+ check_result = self.cmd._check_allocations_table()
+ self.assertEqual(Code.WARNING,
+ check_result.code)
+ expected_msg = ('The Allocations table is is not using UTF8 '
+ 'encoding. This is corrected in later versions '
+ 'of Ironic, where the table character set schema '
+ 'is automatically migrated. Continued use of a '
+ 'non-UTF8 character set may produce unexpected '
+ 'results.')
+ self.assertEqual(expected_msg, check_result.details)
+
+ @mock.patch.object(sqlalchemy.enginefacade.reader,
+ 'get_engine', autospec=True)
+ def test__check_allocations_table_myiasm(self, mock_reader):
+ mock_engine = mock.Mock()
+ mock_res = mock.Mock()
+ mock_engine.url = '..mysql..'
+ mock_res.all.return_value = (
+ '... ENGINE=MyIASM DEFAULT CHARSET=utf8',
+ )
+ mock_engine.execute.return_value = mock_res
+ mock_reader.return_value = mock_engine
+ check_result = self.cmd._check_allocations_table()
+ self.assertEqual(Code.WARNING,
+ check_result.code)
+ expected_msg = ('The engine used by MySQL for the allocations '
+ 'table is not the intended engine for the Ironic '
+ 'database tables to use. This may have been a '
+ 'result of an error with the table creation schema. '
+ 'This may require Database Administrator '
+ 'intervention and downtime to dump, modify the '
+ 'table engine to utilize InnoDB, and reload the '
+ 'allocations table to utilize the InnoDB engine.')
+ self.assertEqual(expected_msg, check_result.details)
+
+ @mock.patch.object(sqlalchemy.enginefacade.reader,
+ 'get_engine', autospec=True)
+ def test__check_allocations_table_myiasm_both(self, mock_reader):
+ mock_engine = mock.Mock()
+ mock_res = mock.Mock()
+ mock_engine.url = '..mysql..'
+ mock_res.all.return_value = (
+ '... ENGINE=MyIASM DEFAULT CHARSET=latin1',
+ )
+ mock_engine.execute.return_value = mock_res
+ mock_reader.return_value = mock_engine
+ check_result = self.cmd._check_allocations_table()
+ self.assertEqual(Code.WARNING,
+ check_result.code)
+ expected_msg = ('The Allocations table is is not using UTF8 '
+ 'encoding. This is corrected in later versions '
+ 'of Ironic, where the table character set schema '
+ 'is automatically migrated. Continued use of a '
+ 'non-UTF8 character set may produce unexpected '
+ 'results. Additionally: '
+ 'The engine used by MySQL for the allocations '
+ 'table is not the intended engine for the Ironic '
+ 'database tables to use. This may have been a '
+ 'result of an error with the table creation schema. '
+ 'This may require Database Administrator '
+ 'intervention and downtime to dump, modify the '
+ 'table engine to utilize InnoDB, and reload the '
+ 'allocations table to utilize the InnoDB engine.')
+ self.assertEqual(expected_msg, check_result.details)
diff --git a/ironic/tests/unit/common/test_glance_service.py b/ironic/tests/unit/common/test_glance_service.py
index 6be0fccd9..f9e713e91 100644
--- a/ironic/tests/unit/common/test_glance_service.py
+++ b/ironic/tests/unit/common/test_glance_service.py
@@ -24,7 +24,6 @@ from glanceclient import exc as glance_exc
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_utils import uuidutils
-import tenacity
import testtools
from ironic.common import context
@@ -147,22 +146,21 @@ class TestGlanceImageService(base.TestCase):
'os_hash_algo': None,
'os_hash_value': None,
}
- if not mock._is_instance_mock(self.service.call):
- mock.patch.object(self.service, 'call', autospec=True).start()
- self.service.call.return_value = image
- image_meta = self.service.show(image_id)
- self.service.call.assert_called_with('get', image_id)
- self.assertEqual(expected, image_meta)
+ with mock.patch.object(self.service, 'call', autospec=True):
+ self.service.call.return_value = image
+ image_meta = self.service.show(image_id)
+ self.service.call.assert_called_with('get', image_id)
+ self.assertEqual(expected, image_meta)
def test_show_makes_datetimes(self):
image_id = uuidutils.generate_uuid()
image = self._make_datetime_fixture()
- with mock.patch.object(self.service, 'call', return_value=image,
- autospec=True):
+ with mock.patch.object(self.service, 'call', autospec=True):
+ self.service.call.return_value = image
image_meta = self.service.show(image_id)
- self.service.call.assert_called_once_with('get', image_id)
- self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
- self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
+ self.service.call.assert_called_with('get', image_id)
+ self.assertEqual(self.NOW_DATETIME, image_meta['created_at'])
+ self.assertEqual(self.NOW_DATETIME, image_meta['updated_at'])
@mock.patch.object(service_utils, 'is_image_active', autospec=True)
def test_show_raises_when_no_authtoken_in_the_context(self,
@@ -176,11 +174,10 @@ class TestGlanceImageService(base.TestCase):
def test_show_raises_when_image_not_active(self):
image_id = uuidutils.generate_uuid()
image = self._make_fixture(name='image1', id=image_id, status="queued")
- if not mock._is_instance_mock(self.service.call):
- mock.patch.object(self.service, 'call', autospec=True).start()
- self.service.call.return_value = image
- self.assertRaises(exception.ImageUnacceptable,
- self.service.show, image_id)
+ with mock.patch.object(self.service, 'call', autospec=True):
+ self.service.call.return_value = image
+ self.assertRaises(exception.ImageUnacceptable,
+ self.service.show, image_id)
def test_download_with_retries(self):
tries = [0]
@@ -204,20 +201,18 @@ class TestGlanceImageService(base.TestCase):
image_id = uuidutils.generate_uuid()
writer = NullWriter()
- with mock.patch.object(tenacity, 'retry', autospec=True) as mock_retry:
- # When retries are disabled, we should get an exception
- self.config(num_retries=0, group='glance')
- self.assertRaises(exception.GlanceConnectionFailed,
- stub_service.download, image_id, writer)
-
- # Now lets enable retries. No exception should happen now.
- self.config(num_retries=1, group='glance')
- importlib.reload(image_service)
- stub_service = image_service.GlanceImageService(stub_client,
- stub_context)
- tries = [0]
- stub_service.download(image_id, writer)
- mock_retry.assert_called_once()
+ # When retries are disabled, we should get an exception
+ self.config(num_retries=0, group='glance')
+ self.assertRaises(exception.GlanceConnectionFailed,
+ stub_service.download, image_id, writer)
+
+ # Now lets enable retries. No exception should happen now.
+ self.config(num_retries=1, group='glance')
+ importlib.reload(image_service)
+ stub_service = image_service.GlanceImageService(stub_client,
+ stub_context)
+ tries = [0]
+ stub_service.download(image_id, writer)
def test_download_no_data(self):
self.client.fake_wrapped = None
diff --git a/ironic/tests/unit/common/test_images.py b/ironic/tests/unit/common/test_images.py
index dd782c687..5530feb38 100644
--- a/ironic/tests/unit/common/test_images.py
+++ b/ironic/tests/unit/common/test_images.py
@@ -318,7 +318,8 @@ class IronicImagesTestCase(base.TestCase):
autospec=True)
def test_is_source_a_path_content_length(self, validate_mock):
mock_response = mock.Mock()
- mock_response.headers = {'Content-Length': 1}
+ mock_response.headers = {'Content-Length': 1,
+ 'Content-Type': 'text/plain'}
validate_mock.return_value = mock_response
self.assertFalse(images.is_source_a_path('context', 'http://foo/bar/'))
validate_mock.assert_called_once_with(mock.ANY, 'http://foo/bar/')
diff --git a/ironic/tests/unit/common/test_pxe_utils.py b/ironic/tests/unit/common/test_pxe_utils.py
index f9d781830..b775c68a1 100644
--- a/ironic/tests/unit/common/test_pxe_utils.py
+++ b/ironic/tests/unit/common/test_pxe_utils.py
@@ -25,6 +25,7 @@ from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import uuidutils
+from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import image_service
from ironic.common import image_service as base_image_service
@@ -45,6 +46,11 @@ DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
+def _reset_dhcp_provider(config, provider_name):
+ config(dhcp_provider=provider_name, group='dhcp')
+ dhcp_factory.DHCPFactory._dhcp_provider = None
+
+
# Prevent /httpboot validation on creating the node
@mock.patch('ironic.drivers.modules.pxe.PXEBoot.__init__', lambda self: None)
class TestPXEUtils(db_base.DbTestCase):
@@ -674,7 +680,7 @@ class TestPXEUtils(db_base.DbTestCase):
# TODO(TheJulia): We should... like... fix the template to
# enable mac address usage.....
grub_tmplte = "ironic/drivers/modules/pxe_grub_config.template"
- self.config(dhcp_provider='none', group='dhcp')
+ _reset_dhcp_provider(self.config, 'none')
self.config(tftp_root=tempfile.mkdtemp(), group='pxe')
link_ip_configs_mock.side_effect = \
exception.FailedToGetIPAddressOnPort(port_id='blah')
@@ -898,7 +904,7 @@ class TestPXEUtils(db_base.DbTestCase):
{'opt_name': '150',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
- {'opt_name': 'server-ip-address',
+ {'opt_name': '255',
'opt_value': '192.0.2.1',
'ip_version': ip_version}
]
@@ -1357,7 +1363,7 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
'LiveOS',
'squashfs.img')),
'ks_template':
- (CONF.anaconda.default_ks_template,
+ ('file://' + CONF.anaconda.default_ks_template,
os.path.join(CONF.deploy.http_root,
self.node.uuid,
'ks.cfg.template')),
@@ -1375,63 +1381,7 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
self.assertEqual(expected_info, image_info)
# In the absense of kickstart template in both instance_info and
# image default kickstart template is used
- self.assertEqual(CONF.anaconda.default_ks_template,
- image_info['ks_template'][0])
- calls = [mock.call(task.node), mock.call(task.node)]
- boot_opt_mock.assert_has_calls(calls)
- # Instance info gets presedence over kickstart template on the
- # image
- properties['properties'] = {'ks_template': 'glance://template_id'}
- task.node.instance_info['ks_template'] = 'https://server/fake.tmpl'
- image_show_mock.return_value = properties
- image_info = pxe_utils.get_instance_image_info(
- task, ipxe_enabled=False)
- self.assertEqual('https://server/fake.tmpl',
- image_info['ks_template'][0])
-
- @mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
- return_value='kickstart', autospec=True)
- @mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
- def test_get_instance_image_info_with_kickstart_url(
- self, image_show_mock, boot_opt_mock):
- properties = {'properties': {u'kernel_id': u'instance_kernel_uuid',
- u'ramdisk_id': u'instance_ramdisk_uuid',
- u'image_source': u'http://path/to/os/'}}
-
- expected_info = {'ramdisk':
- ('instance_ramdisk_uuid',
- os.path.join(CONF.pxe.tftp_root,
- self.node.uuid,
- 'ramdisk')),
- 'kernel':
- ('instance_kernel_uuid',
- os.path.join(CONF.pxe.tftp_root,
- self.node.uuid,
- 'kernel')),
- 'ks_template':
- (CONF.anaconda.default_ks_template,
- os.path.join(CONF.deploy.http_root,
- self.node.uuid,
- 'ks.cfg.template')),
- 'ks_cfg':
- ('',
- os.path.join(CONF.deploy.http_root,
- self.node.uuid,
- 'ks.cfg'))}
- image_show_mock.return_value = properties
- self.context.auth_token = 'fake'
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- dii = task.node.driver_internal_info
- dii['is_source_a_path'] = True
- task.node.driver_internal_info = dii
- task.node.save()
- image_info = pxe_utils.get_instance_image_info(
- task, ipxe_enabled=False)
- self.assertEqual(expected_info, image_info)
- # In the absense of kickstart template in both instance_info and
- # image default kickstart template is used
- self.assertEqual(CONF.anaconda.default_ks_template,
+ self.assertEqual('file://' + CONF.anaconda.default_ks_template,
image_info['ks_template'][0])
calls = [mock.call(task.node), mock.call(task.node)]
boot_opt_mock.assert_has_calls(calls)
@@ -1463,7 +1413,7 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
self.node.uuid,
'kernel')),
'ks_template':
- (CONF.anaconda.default_ks_template,
+ ('file://' + CONF.anaconda.default_ks_template,
os.path.join(CONF.deploy.http_root,
self.node.uuid,
'ks.cfg.template')),
@@ -1490,7 +1440,7 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
self.assertEqual(expected_info, image_info)
# In the absense of kickstart template in both instance_info and
# image default kickstart template is used
- self.assertEqual(CONF.anaconda.default_ks_template,
+ self.assertEqual('file://' + CONF.anaconda.default_ks_template,
image_info['ks_template'][0])
calls = [mock.call(task.node), mock.call(task.node)]
boot_opt_mock.assert_has_calls(calls)
@@ -1577,6 +1527,46 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
list(fake_pxe_info.values()),
True)
+ @mock.patch.object(os, 'chmod', autospec=True)
+ @mock.patch.object(pxe_utils, 'TFTPImageCache', lambda: None)
+ @mock.patch.object(pxe_utils, 'ensure_tree', autospec=True)
+ @mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
+ def test_cache_ramdisk_kernel_ipxe_anaconda(self, mock_fetch_image,
+ mock_ensure_tree, mock_chmod):
+ expected_path = os.path.join(CONF.deploy.http_root,
+ self.node.uuid)
+ fake_pxe_info = {'ramdisk':
+ ('instance_ramdisk_uuid',
+ os.path.join(CONF.pxe.tftp_root,
+ self.node.uuid,
+ 'ramdisk')),
+ 'kernel':
+ ('instance_kernel_uuid',
+ os.path.join(CONF.pxe.tftp_root,
+ self.node.uuid,
+ 'kernel')),
+ 'ks_template':
+ ('file://' + CONF.anaconda.default_ks_template,
+ os.path.join(CONF.deploy.http_root,
+ self.node.uuid,
+ 'ks.cfg.template')),
+ 'ks_cfg':
+ ('',
+ os.path.join(CONF.deploy.http_root,
+ self.node.uuid,
+ 'ks.cfg'))}
+ expected = fake_pxe_info.copy()
+ expected.pop('ks_cfg')
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info,
+ ipxe_enabled=True)
+ mock_ensure_tree.assert_called_with(expected_path)
+ mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
+ list(expected.values()),
+ True)
+
@mock.patch.object(pxe.PXEBoot, '__init__', lambda self: None)
class PXEBuildKickstartConfigOptionsTestCase(db_base.DbTestCase):
@@ -1628,6 +1618,26 @@ class PXEBuildKickstartConfigOptionsTestCase(db_base.DbTestCase):
params = pxe_utils.build_kickstart_config_options(task)
self.assertTrue(params['ks_options'].pop('agent_token'))
self.assertEqual(expected, params['ks_options'])
+ self.assertNotIn('insecure_heartbeat', params)
+
+ @mock.patch.object(deploy_utils, 'get_ironic_api_url', autospec=True)
+ def test_build_kickstart_config_options_pxe_insecure_heartbeat(
+ self, api_url_mock):
+ api_url_mock.return_value = 'http://ironic-api'
+ self.assertFalse(CONF.anaconda.insecure_heartbeat)
+ CONF.set_override('insecure_heartbeat', True, 'anaconda')
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ expected = {}
+ expected['liveimg_url'] = task.node.instance_info['image_url']
+ expected['config_drive'] = ''
+ expected['heartbeat_url'] = (
+ 'http://ironic-api/v1/heartbeat/%s' % task.node.uuid
+ )
+ expected['insecure_heartbeat'] = 'true'
+ params = pxe_utils.build_kickstart_config_options(task)
+ self.assertTrue(params['ks_options'].pop('agent_token'))
+ self.assertEqual(expected, params['ks_options'])
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_prepare_instance_kickstart_config_not_anaconda_boot(self,
@@ -1900,7 +1910,8 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
self.config(tftp_server='ff80::1', group='pxe')
self.config(http_url='http://[ff80::1]:1234', group='deploy')
- self.config(dhcp_provider='isc', group='dhcp')
+ _reset_dhcp_provider(self.config, 'none')
+
if ip_version == 6:
# NOTE(TheJulia): DHCPv6 RFCs seem to indicate that the prior
# options are not imported, although they may be supported
@@ -1928,7 +1939,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
{'opt_name': '67',
'opt_value': expected_boot_script_url,
'ip_version': ip_version},
- {'opt_name': 'server-ip-address',
+ {'opt_name': '255',
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
@@ -1936,7 +1947,8 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True))
- self.config(dhcp_provider='neutron', group='dhcp')
+ _reset_dhcp_provider(self.config, 'neutron')
+
if ip_version == 6:
# Boot URL variable set from prior test of isc parameters.
expected_info = [{'opt_name': 'tag:!ipxe6,59',
@@ -1959,7 +1971,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
{'opt_name': 'tag:ipxe,67',
'opt_value': expected_boot_script_url,
'ip_version': ip_version},
- {'opt_name': 'server-ip-address',
+ {'opt_name': '255',
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
diff --git a/ironic/tests/unit/common/test_release_mappings.py b/ironic/tests/unit/common/test_release_mappings.py
index da1eeedd2..dad536257 100644
--- a/ironic/tests/unit/common/test_release_mappings.py
+++ b/ironic/tests/unit/common/test_release_mappings.py
@@ -44,7 +44,7 @@ NUMERIC_RELEASES = sorted(
map(versionutils.convert_version_to_tuple,
set(release_mappings.RELEASE_MAPPING)
# Update the exceptions whenever needed
- - {'master', 'yoga', 'xena'}),
+ - {'master', 'zed', 'yoga'}),
reverse=True)
@@ -91,13 +91,17 @@ class ReleaseMappingsTestCase(base.TestCase):
def test_contains_all_db_objects(self):
self.assertIn('master', release_mappings.RELEASE_MAPPING)
- model_names = set((s.__name__ for s in models.Base.__subclasses__()))
+ use_models = models.Base.__subclasses__()
+ use_models.append(models.Node)
+ model_names = set((s.__name__ for s in use_models))
# NOTE(xek): As a rule, all models which can be changed between
# releases or are sent through RPC should have their counterpart
# versioned objects. Do not add an exception for such objects,
# initialize them with the version 1.0 instead.
+ # NodeBase is also excluded as it is covered by Node.
exceptions = set(['NodeTag', 'ConductorHardwareInterfaces',
- 'NodeTrait', 'DeployTemplateStep'])
+ 'NodeTrait', 'DeployTemplateStep',
+ 'NodeBase'])
model_names -= exceptions
# NodeTrait maps to two objects
model_names |= set(['Trait', 'TraitList'])
diff --git a/ironic/tests/unit/common/test_rpc_service.py b/ironic/tests/unit/common/test_rpc_service.py
index 8483bfb22..09446ecf8 100644
--- a/ironic/tests/unit/common/test_rpc_service.py
+++ b/ironic/tests/unit/common/test_rpc_service.py
@@ -10,24 +10,28 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+import time
from unittest import mock
from oslo_config import cfg
import oslo_messaging
from oslo_service import service as base_service
+from oslo_utils import timeutils
from ironic.common import context
from ironic.common import rpc
from ironic.common import rpc_service
from ironic.conductor import manager
from ironic.objects import base as objects_base
-from ironic.tests import base
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.db import utils as db_utils
CONF = cfg.CONF
@mock.patch.object(base_service.Service, '__init__', lambda *_, **__: None)
-class TestRPCService(base.TestCase):
+class TestRPCService(db_base.DbTestCase):
def setUp(self):
super(TestRPCService, self).setUp()
@@ -35,6 +39,7 @@ class TestRPCService(base.TestCase):
mgr_module = "ironic.conductor.manager"
mgr_class = "ConductorManager"
self.rpc_svc = rpc_service.RPCService(host, mgr_module, mgr_class)
+ self.rpc_svc.manager.dbapi = self.dbapi
@mock.patch.object(manager.ConductorManager, 'prepare_host', autospec=True)
@mock.patch.object(oslo_messaging, 'Target', autospec=True)
@@ -108,3 +113,75 @@ class TestRPCService(base.TestCase):
self.assertFalse(self.rpc_svc._started)
self.assertIn("boom", self.rpc_svc._failure)
self.assertRaises(SystemExit, self.rpc_svc.wait_for_start)
+
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_stop_instant(self, mock_sleep, mock_utcnow):
+ # del_host returns instantly
+ mock_utcnow.return_value = datetime.datetime(2023, 2, 2, 21, 10, 0)
+ conductor1 = db_utils.get_test_conductor(hostname='fake_host')
+ with mock.patch.object(self.dbapi, 'get_online_conductors',
+ autospec=True) as mock_cond_list:
+ mock_cond_list.return_value = [conductor1]
+ self.rpc_svc.stop()
+
+ # single conductor so exit immediately without waiting
+ mock_sleep.assert_not_called()
+
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_stop_after_full_reset_interval(self, mock_sleep, mock_utcnow):
+ # del_host returns instantly
+ mock_utcnow.return_value = datetime.datetime(2023, 2, 2, 21, 10, 0)
+ conductor1 = db_utils.get_test_conductor(hostname='fake_host')
+ conductor2 = db_utils.get_test_conductor(hostname='other_fake_host')
+ with mock.patch.object(self.dbapi, 'get_online_conductors',
+ autospec=True) as mock_cond_list:
+ # multiple conductors, so wait for hash_ring_reset_interval
+ mock_cond_list.return_value = [conductor1, conductor2]
+ self.rpc_svc.stop()
+
+ # wait the total CONF.hash_ring_reset_interval 15 seconds
+ mock_sleep.assert_has_calls([mock.call(15)])
+
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_stop_after_remaining_interval(self, mock_sleep, mock_utcnow):
+ mock_utcnow.return_value = datetime.datetime(2023, 2, 2, 21, 10, 0)
+ conductor1 = db_utils.get_test_conductor(hostname='fake_host')
+ conductor2 = db_utils.get_test_conductor(hostname='other_fake_host')
+
+ # del_host returns after 5 seconds
+ mock_utcnow.side_effect = [
+ datetime.datetime(2023, 2, 2, 21, 10, 0),
+ datetime.datetime(2023, 2, 2, 21, 10, 5),
+ ]
+ with mock.patch.object(self.dbapi, 'get_online_conductors',
+ autospec=True) as mock_cond_list:
+ # multiple conductors, so wait for hash_ring_reset_interval
+ mock_cond_list.return_value = [conductor1, conductor2]
+ self.rpc_svc.stop()
+
+ # wait the remaining 10 seconds
+ mock_sleep.assert_has_calls([mock.call(10)])
+
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_stop_slow(self, mock_sleep, mock_utcnow):
+ mock_utcnow.return_value = datetime.datetime(2023, 2, 2, 21, 10, 0)
+ conductor1 = db_utils.get_test_conductor(hostname='fake_host')
+ conductor2 = db_utils.get_test_conductor(hostname='other_fake_host')
+
+ # del_host returns after 16 seconds
+ mock_utcnow.side_effect = [
+ datetime.datetime(2023, 2, 2, 21, 10, 0),
+ datetime.datetime(2023, 2, 2, 21, 10, 16),
+ ]
+ with mock.patch.object(self.dbapi, 'get_online_conductors',
+ autospec=True) as mock_cond_list:
+ # multiple conductors, so wait for hash_ring_reset_interval
+ mock_cond_list.return_value = [conductor1, conductor2]
+ self.rpc_svc.stop()
+
+ # no wait required, CONF.hash_ring_reset_interval already exceeded
+ mock_sleep.assert_not_called()
diff --git a/ironic/tests/unit/conductor/mgr_utils.py b/ironic/tests/unit/conductor/mgr_utils.py
index 4451d7a15..8ee1fd1f9 100644
--- a/ironic/tests/unit/conductor/mgr_utils.py
+++ b/ironic/tests/unit/conductor/mgr_utils.py
@@ -127,7 +127,12 @@ class ServiceSetUpMixin(object):
def setUp(self):
super(ServiceSetUpMixin, self).setUp()
self.hostname = 'test-host'
- self.config(node_locked_retry_attempts=1, group='conductor')
+ # Relies upon the default number of "NodeLocked" retries as
+ # in unit testing, sqllite is not operated in a transactional
+ # way and utilizes asynchonous IO. Locking, in particular, can
+ # detect this, and it can cause some false or delayed inpressions
+ # of lock status, causing lock failures.
+ self.config(node_locked_retry_attempts=3, group='conductor')
self.config(node_locked_retry_interval=0, group='conductor')
self.service = manager.ConductorManager(self.hostname, 'test-topic')
@@ -139,15 +144,18 @@ class ServiceSetUpMixin(object):
return
self.service.del_host()
- def _start_service(self, start_periodic_tasks=False):
+ def _start_service(self, start_periodic_tasks=False, start_consoles=True,
+ start_allocations=True):
if start_periodic_tasks:
- self.service.init_host()
+ self.service.init_host(start_consoles=start_consoles,
+ start_allocations=start_allocations)
else:
with mock.patch.object(periodics, 'PeriodicWorker', autospec=True):
with mock.patch.object(pxe_utils, 'place_common_config',
autospec=True):
self.service.prepare_host()
- self.service.init_host()
+ self.service.init_host(start_consoles=start_consoles,
+ start_allocations=start_allocations)
self.addCleanup(self._stop_service)
diff --git a/ironic/tests/unit/conductor/test_allocations.py b/ironic/tests/unit/conductor/test_allocations.py
index d063cd13a..6d77bd65b 100644
--- a/ironic/tests/unit/conductor/test_allocations.py
+++ b/ironic/tests/unit/conductor/test_allocations.py
@@ -209,7 +209,7 @@ class AllocationTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
state='allocating',
conductor_affinity=dead_conductor.id)
- self._start_service()
+ self._start_service(start_allocations=False)
with mock.patch.object(self.dbapi, 'get_offline_conductors',
autospec=True) as mock_conds:
mock_conds.return_value = [dead_conductor.id]
diff --git a/ironic/tests/unit/conductor/test_base_manager.py b/ironic/tests/unit/conductor/test_base_manager.py
index f92c6e58c..e69003123 100644
--- a/ironic/tests/unit/conductor/test_base_manager.py
+++ b/ironic/tests/unit/conductor/test_base_manager.py
@@ -494,9 +494,11 @@ class StartConsolesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
obj_utils.create_test_node(
self.context,
uuid=uuidutils.generate_uuid(),
- driver='fake-hardware'
+ driver='fake-hardware',
)
- self._start_service()
+ # Enable consoles *after* service has started, otherwise it races
+ # as the service startup also launches consoles.
+ self._start_service(start_consoles=False)
self.service._start_consoles(self.context)
self.assertEqual(2, mock_start_console.call_count)
mock_notify.assert_has_calls(
diff --git a/ironic/tests/unit/conductor/test_cleaning.py b/ironic/tests/unit/conductor/test_cleaning.py
index 65261450a..34e805deb 100644
--- a/ironic/tests/unit/conductor/test_cleaning.py
+++ b/ironic/tests/unit/conductor/test_cleaning.py
@@ -51,8 +51,6 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
'step': 'build_raid', 'priority': 0, 'interface': 'deploy'}
def __do_node_clean_validate_fail(self, mock_validate, clean_steps=None):
- # InvalidParameterValue should cause node to go to CLEANFAIL
- mock_validate.side_effect = exception.InvalidParameterValue('error')
tgt_prov_state = states.MANAGEABLE if clean_steps else states.AVAILABLE
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
@@ -68,26 +66,42 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
self.assertIsNone(node.fault)
mock_validate.assert_called_once_with(mock.ANY, mock.ANY)
+ def __do_node_clean_validate_fail_invalid(self, mock_validate,
+ clean_steps=None):
+ # InvalidParameterValue should cause node to go to CLEANFAIL
+ mock_validate.side_effect = exception.InvalidParameterValue('error')
+ self.__do_node_clean_validate_fail(mock_validate,
+ clean_steps=clean_steps)
+
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test__do_node_clean_automated_power_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate)
+ self.__do_node_clean_validate_fail_invalid(mock_validate)
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test__do_node_clean_manual_power_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate, clean_steps=[])
+ self.__do_node_clean_validate_fail_invalid(mock_validate,
+ clean_steps=[])
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
def test__do_node_clean_automated_network_validate_fail(self,
mock_validate):
- self.__do_node_clean_validate_fail(mock_validate)
+ self.__do_node_clean_validate_fail_invalid(mock_validate)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
autospec=True)
def test__do_node_clean_manual_network_validate_fail(self, mock_validate):
- self.__do_node_clean_validate_fail(mock_validate, clean_steps=[])
+ self.__do_node_clean_validate_fail_invalid(mock_validate,
+ clean_steps=[])
+
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
+ def test__do_node_clean_network_error_fail(self, mock_validate):
+ # NetworkError should cause node to go to CLEANFAIL
+ mock_validate.side_effect = exception.NetworkError()
+ self.__do_node_clean_validate_fail(mock_validate)
@mock.patch.object(conductor_utils, 'LOG', autospec=True)
@mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
@@ -1124,12 +1138,12 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
class DoNodeCleanAbortTestCase(db_base.DbTestCase):
@mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
- def _test__do_node_clean_abort(self, step_name, tear_mock):
+ def _test_do_node_clean_abort(self, clean_step, tear_mock):
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
- provision_state=states.CLEANFAIL,
+ provision_state=states.CLEANWAIT,
target_provision_state=states.AVAILABLE,
- clean_step={'step': 'foo', 'abortable': True},
+ clean_step=clean_step,
driver_internal_info={
'agent_url': 'some url',
'agent_secret_token': 'token',
@@ -1139,11 +1153,11 @@ class DoNodeCleanAbortTestCase(db_base.DbTestCase):
'skip_current_clean_step': True})
with task_manager.acquire(self.context, node.uuid) as task:
- cleaning.do_node_clean_abort(task, step_name=step_name)
+ cleaning.do_node_clean_abort(task)
self.assertIsNotNone(task.node.last_error)
tear_mock.assert_called_once_with(task.driver.deploy, task)
- if step_name:
- self.assertIn(step_name, task.node.last_error)
+ if clean_step:
+ self.assertIn(clean_step['step'], task.node.last_error)
# assert node's clean_step and metadata was cleaned up
self.assertEqual({}, task.node.clean_step)
self.assertNotIn('clean_step_index',
@@ -1159,11 +1173,12 @@ class DoNodeCleanAbortTestCase(db_base.DbTestCase):
self.assertNotIn('agent_secret_token',
task.node.driver_internal_info)
- def test__do_node_clean_abort(self):
- self._test__do_node_clean_abort(None)
+ def test_do_node_clean_abort_early(self):
+ self._test_do_node_clean_abort(None)
- def test__do_node_clean_abort_with_step_name(self):
- self._test__do_node_clean_abort('foo')
+ def test_do_node_clean_abort_with_step(self):
+ self._test_do_node_clean_abort({'step': 'foo', 'interface': 'deploy',
+ 'abortable': True})
@mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
def test__do_node_clean_abort_tear_down_fail(self, tear_mock):
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index 378d65f15..6a6f7e08f 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -26,6 +26,7 @@ from unittest import mock
import eventlet
from futurist import waiters
+from ironic_lib import metrics as ironic_metrics
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import uuidutils
@@ -54,6 +55,7 @@ from ironic.conductor import verify
from ironic.db import api as dbapi
from ironic.drivers import base as drivers_base
from ironic.drivers.modules import fake
+from ironic.drivers.modules import inspect_utils
from ironic.drivers.modules.network import flat as n_flat
from ironic import objects
from ironic.objects import base as obj_base
@@ -1829,6 +1831,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
def test_do_node_deploy_maintenance(self, mock_iwdi):
mock_iwdi.return_value = False
+ self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
maintenance=True)
exc = self.assertRaises(messaging.rpc.ExpectedException,
@@ -1843,6 +1846,7 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
self.assertFalse(mock_iwdi.called)
def _test_do_node_deploy_validate_fail(self, mock_validate, mock_iwdi):
+ self._start_service()
mock_iwdi.return_value = False
# InvalidParameterValue should be re-raised as InstanceDeployFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
@@ -2389,6 +2393,7 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakePower.validate',
autospec=True)
def test_do_node_tear_down_validate_fail(self, mock_validate):
+ self._start_service()
# InvalidParameterValue should be re-raised as InstanceDeployFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
node = obj_utils.create_test_node(
@@ -2730,7 +2735,8 @@ class DoProvisioningActionTestCase(mgr_utils.ServiceSetUpMixin,
# Node will be moved to tgt_prov_state after cleaning, not tested here
self.assertEqual(states.CLEANFAIL, node.provision_state)
self.assertEqual(tgt_prov_state, node.target_provision_state)
- self.assertIsNone(node.last_error)
+ self.assertEqual('By request, the clean operation was aborted',
+ node.last_error)
mock_spawn.assert_called_with(
self.service, cleaning.do_node_clean_abort, mock.ANY)
@@ -3898,6 +3904,39 @@ class DestroyNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.dbapi.get_node_by_uuid,
node.uuid)
+ @mock.patch.object(inspect_utils, 'clean_up_swift_entries', autospec=True)
+ def test_inventory_in_swift_get_destroyed_after_destroying_a_node_by_uuid(
+ self, mock_clean_up):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware')
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ self._start_service()
+ self.service.destroy_node(self.context, node.uuid)
+ mock_clean_up.assert_called_once_with(mock.ANY)
+
+ @mock.patch.object(inspect_utils, 'clean_up_swift_entries', autospec=True)
+ def test_inventory_in_swift_not_destroyed_SwiftOSE_maintenance(
+ self, mock_clean_up):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ maintenance=True)
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ mock_clean_up.side_effect = exception.SwiftObjectStillExists(
+ obj="inventory-123", node=node.uuid)
+ self._start_service()
+ self.service.destroy_node(self.context, node.uuid)
+
+ @mock.patch.object(inspect_utils, 'clean_up_swift_entries', autospec=True)
+ def test_inventory_in_swift_not_destroyed_SwiftOSE_not_maintenance(
+ self, mock_clean_up):
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ maintenance=False)
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ mock_clean_up.side_effect = exception.SwiftObjectStillExists(
+ obj="inventory-123", node=node.uuid)
+ self._start_service()
+ self.assertRaises(exception.SwiftObjectStillExists,
+ self.service.destroy_node, self.context,
+ node.uuid)
+
@mgr_utils.mock_record_keepalive
class CreatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@@ -4236,7 +4275,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__filter_out_unsupported_types_all(self):
self._start_service()
- CONF.set_override('send_sensor_data_types', ['All'], group='conductor')
+ CONF.set_override('data_types', ['All'],
+ group='sensor_data')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
@@ -4245,7 +4285,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__filter_out_unsupported_types_part(self):
self._start_service()
- CONF.set_override('send_sensor_data_types', ['t1'], group='conductor')
+ CONF.set_override('data_types', ['t1'],
+ group='sensor_data')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
@@ -4254,7 +4295,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__filter_out_unsupported_types_non(self):
self._start_service()
- CONF.set_override('send_sensor_data_types', ['t3'], group='conductor')
+ CONF.set_override('data_types', ['t3'],
+ group='sensor_data')
fake_sensors_data = {"t1": {'f1': 'v1'}, "t2": {'f1': 'v1'}}
actual_result = (
self.service._filter_out_unsupported_types(fake_sensors_data))
@@ -4268,7 +4310,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
for i in range(5):
nodes.put_nowait(('fake_uuid-%d' % i, 'fake-hardware', '', None))
self._start_service()
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True,
+ group='sensor_data')
task = acquire_mock.return_value.__enter__.return_value
task.node.maintenance = False
@@ -4297,7 +4340,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
self._start_service()
self.service._shutdown = True
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True,
+ group='sensor_data')
self.service._sensors_nodes_task(self.context, nodes)
acquire_mock.return_value.__enter__.assert_not_called()
@@ -4306,7 +4350,8 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True,
+ group='sensor_data')
self._start_service()
@@ -4324,7 +4369,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
self._start_service()
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
task = acquire_mock.return_value.__enter__.return_value
task.node.maintenance = True
@@ -4347,10 +4392,10 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn):
self._start_service()
- CONF.set_override('send_sensor_data', True, group='conductor')
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
# NOTE(galyna): do not wait for threads to be finished in unittests
- CONF.set_override('send_sensor_data_wait_timeout', 0,
- group='conductor')
+ CONF.set_override('wait_timeout', 0,
+ group='sensor_data')
_mapped_to_this_conductor_mock.return_value = True
get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake', None)]
self.service._send_sensor_data(self.context)
@@ -4358,6 +4403,37 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.service._sensors_nodes_task,
self.context, mock.ANY)
+ @mock.patch.object(queue, 'Queue', autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_sensors_conductor',
+ autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_spawn_worker',
+ autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+ @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
+ def test___send_sensor_data_disabled(
+ self, get_nodeinfo_list_mock,
+ _mapped_to_this_conductor_mock,
+ mock_spawn, mock_sensors_conductor,
+ mock_queue):
+ self._start_service()
+
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
+ CONF.set_override('enable_for_nodes', False,
+ group='sensor_data')
+ CONF.set_override('enable_for_conductor', False,
+ group='sensor_data')
+ # NOTE(galyna): do not wait for threads to be finished in unittests
+ CONF.set_override('wait_timeout', 0,
+ group='sensor_data')
+ _mapped_to_this_conductor_mock.return_value = True
+ get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake', None)]
+ self.service._send_sensor_data(self.context)
+ mock_sensors_conductor.assert_not_called()
+ # NOTE(TheJulia): Can't use the spawn worker since it records other,
+ # unrelated calls. So, queue works well here.
+ mock_queue.assert_not_called()
+
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
@@ -4370,24 +4446,66 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.reset_mock()
number_of_workers = 8
- CONF.set_override('send_sensor_data', True, group='conductor')
- CONF.set_override('send_sensor_data_workers', number_of_workers,
- group='conductor')
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
+ CONF.set_override('workers', number_of_workers,
+ group='sensor_data')
# NOTE(galyna): do not wait for threads to be finished in unittests
- CONF.set_override('send_sensor_data_wait_timeout', 0,
- group='conductor')
+ CONF.set_override('wait_timeout', 0,
+ group='sensor_data')
_mapped_to_this_conductor_mock.return_value = True
get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake',
None)] * 20
self.service._send_sensor_data(self.context)
- self.assertEqual(number_of_workers,
+ self.assertEqual(number_of_workers + 1,
mock_spawn.call_count)
# TODO(TheJulia): At some point, we should add a test to validate that
# a modified filter to return all nodes actually works, although
# the way the sensor tests are written, the list is all mocked.
+ @mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
+ autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+ @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
+ def test___send_sensor_data_one_worker(
+ self, get_nodeinfo_list_mock, _mapped_to_this_conductor_mock,
+ mock_spawn):
+ self._start_service()
+ mock_spawn.reset_mock()
+
+ number_of_workers = 1
+ CONF.set_override('send_sensor_data', True, group='sensor_data')
+ CONF.set_override('workers', number_of_workers,
+ group='sensor_data')
+ # NOTE(galyna): do not wait for threads to be finished in unittests
+ CONF.set_override('wait_timeout', 0,
+ group='sensor_data')
+
+ _mapped_to_this_conductor_mock.return_value = True
+ get_nodeinfo_list_mock.return_value = [('fake_uuid', 'fake',
+ None)] * 20
+ self.service._send_sensor_data(self.context)
+ self.assertEqual(number_of_workers,
+ mock_spawn.call_count)
+
+ @mock.patch.object(messaging.Notifier, 'info', autospec=True)
+ @mock.patch.object(ironic_metrics.MetricLogger,
+ 'get_metrics_data', autospec=True)
+ def test__sensors_conductor(self, mock_get_metrics, mock_notifier):
+ metric = {'metric': 'data'}
+ mock_get_metrics.return_value = metric
+ self._start_service()
+ self.service._sensors_conductor(self.context)
+ self.assertEqual(mock_notifier.call_count, 1)
+ self.assertEqual('ironic.metrics', mock_notifier.call_args.args[2])
+ metrics_dict = mock_notifier.call_args.args[3]
+ self.assertEqual(metrics_dict.get('event_type'),
+ 'ironic.metrics.update')
+ self.assertDictEqual(metrics_dict.get('payload'),
+ metric)
+
@mgr_utils.mock_record_keepalive
class BootDeviceTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@@ -7241,44 +7359,6 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock.call(task, 'console_restore',
obj_fields.NotificationStatus.ERROR)])
- @mock.patch.object(notification_utils, 'emit_console_notification',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
- autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
- autospec=True)
- def test__do_takeover_with_console_port_cleaned(self, mock_prepare,
- mock_take_over,
- mock_start_console,
- mock_notify):
- self._start_service()
- node = obj_utils.create_test_node(self.context, driver='fake-hardware',
- console_enabled=True)
- di_info = node.driver_internal_info
- di_info['allocated_ipmi_terminal_port'] = 12345
- node.driver_internal_info = di_info
- node.save()
-
- task = task_manager.TaskManager(self.context, node.uuid)
-
- self.service._do_takeover(task)
- node.refresh()
- self.assertIsNone(node.last_error)
- self.assertTrue(node.console_enabled)
- self.assertIsNone(
- node.driver_internal_info.get('allocated_ipmi_terminal_port',
- None))
- mock_prepare.assert_called_once_with(task.driver.deploy, task)
- mock_take_over.assert_called_once_with(task.driver.deploy, task)
- mock_start_console.assert_called_once_with(task.driver.console, task)
- mock_notify.assert_has_calls(
- [mock.call(task, 'console_restore',
- obj_fields.NotificationStatus.START),
- mock.call(task, 'console_restore',
- obj_fields.NotificationStatus.END)])
-
@mgr_utils.mock_record_keepalive
class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@@ -8374,7 +8454,6 @@ class NodeHistoryRecordCleanupTestCase(mgr_utils.ServiceSetUpMixin,
# 9 retained due to days, 3 to config
self.service._manage_node_history(self.context)
events = objects.NodeHistory.list(self.context)
- print(events)
self.assertEqual(12, len(events))
events = objects.NodeHistory.list_by_node_id(self.context, 10)
self.assertEqual(4, len(events))
@@ -8394,3 +8473,73 @@ class NodeHistoryRecordCleanupTestCase(mgr_utils.ServiceSetUpMixin,
self.assertEqual('one', events[1].event)
self.assertEqual('two', events[2].event)
self.assertEqual('three', events[3].event)
+
+
+class ConcurrentActionLimitTestCase(mgr_utils.ServiceSetUpMixin,
+ db_base.DbTestCase):
+
+ def setUp(self):
+ super(ConcurrentActionLimitTestCase, self).setUp()
+ self._start_service()
+ self.node1 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=110,
+ uuid=uuidutils.generate_uuid())
+ self.node2 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=111,
+ uuid=uuidutils.generate_uuid())
+ self.node3 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=112,
+ uuid=uuidutils.generate_uuid())
+ self.node4 = obj_utils.get_test_node(
+ self.context,
+ driver='fake-hardware',
+ id=113,
+ uuid=uuidutils.generate_uuid())
+ # Create the nodes, as the tasks need to operate across tables.
+ self.node1.create()
+ self.node2.create()
+ self.node3.create()
+ self.node4.create()
+
+ def test_concurrent_action_limit_deploy(self):
+ self.node1.provision_state = states.DEPLOYING
+ self.node2.provision_state = states.DEPLOYWAIT
+ self.node1.save()
+ self.node2.save()
+ CONF.set_override('max_concurrent_deploy', 2, group='conductor')
+ self.assertRaises(
+ exception.ConcurrentActionLimit,
+ self.service._concurrent_action_limit,
+ 'provisioning')
+ self.service._concurrent_action_limit('unprovisioning')
+ self.service._concurrent_action_limit('cleaning')
+ CONF.set_override('max_concurrent_deploy', 3, group='conductor')
+ self.service._concurrent_action_limit('provisioning')
+
+ def test_concurrent_action_limit_cleaning(self):
+ self.node1.provision_state = states.DELETING
+ self.node2.provision_state = states.CLEANING
+ self.node3.provision_state = states.CLEANWAIT
+ self.node1.save()
+ self.node2.save()
+ self.node3.save()
+
+ CONF.set_override('max_concurrent_clean', 3, group='conductor')
+ self.assertRaises(
+ exception.ConcurrentActionLimit,
+ self.service._concurrent_action_limit,
+ 'cleaning')
+ self.assertRaises(
+ exception.ConcurrentActionLimit,
+ self.service._concurrent_action_limit,
+ 'unprovisioning')
+ self.service._concurrent_action_limit('provisioning')
+ CONF.set_override('max_concurrent_clean', 4, group='conductor')
+ self.service._concurrent_action_limit('cleaning')
+ self.service._concurrent_action_limit('unprovisioning')
diff --git a/ironic/tests/unit/conductor/test_utils.py b/ironic/tests/unit/conductor/test_utils.py
index a424e5132..a29da21a7 100644
--- a/ironic/tests/unit/conductor/test_utils.py
+++ b/ironic/tests/unit/conductor/test_utils.py
@@ -196,7 +196,8 @@ class NodePowerActionTestCase(db_base.DbTestCase):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake-hardware',
- power_state=states.POWER_OFF)
+ power_state=states.POWER_OFF,
+ last_error='failed before')
task = task_manager.TaskManager(self.context, node.uuid)
get_power_mock.return_value = states.POWER_OFF
@@ -209,6 +210,27 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
+ @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
+ def test_node_power_action_keep_last_error(self, get_power_mock):
+ """Test node_power_action to keep last_error for failed states."""
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='fake-hardware',
+ power_state=states.POWER_OFF,
+ provision_state=states.CLEANFAIL,
+ last_error='failed before')
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ get_power_mock.return_value = states.POWER_OFF
+
+ conductor_utils.node_power_action(task, states.POWER_ON)
+
+ node.refresh()
+ get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
+ self.assertEqual(states.POWER_ON, node['power_state'])
+ self.assertIsNone(node['target_power_state'])
+ self.assertEqual('failed before', node['last_error'])
+
@mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
index d2af35ceb..4abd1cbc4 100644
--- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py
+++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
@@ -36,6 +36,7 @@ For postgres on Ubuntu this can be done with the following commands:
import collections
import contextlib
+import json
from unittest import mock
from alembic import script
@@ -114,6 +115,7 @@ class WalkVersionsMixin(object):
check = getattr(self, "_check_%s" % version, None)
if check:
check(engine, data)
+
except Exception:
LOG.error("Failed to migrate to version %(version)s on engine "
"%(engine)s",
@@ -248,17 +250,24 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_conductor = conductors.insert().values(data_conductor)
connection.execute(insert_conductor)
- conductor_stmt = conductors.select(
- conductors.c.hostname == data_conductor['hostname'])
+ conductor_stmt = sqlalchemy.select(
+ models.Conductor.id
+ ).where(
+ models.Conductor.hostname == 'test_host'
+ )
conductor = connection.execute(conductor_stmt).first()
-
data_node = {'uuid': uuidutils.generate_uuid(),
- 'conductor_affinity': conductor['id']}
+ 'conductor_affinity': conductor.id}
insert_node = nodes.insert().values(data_node)
+
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == data_node['uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.conductor_affinity
+ ).where(
+ models.Node.uuid == data_node['uuid']
+ )
node = connection.execute(node_stmt).first()
- self.assertEqual(conductor['id'], node['conductor_affinity'])
+ self.assertEqual(conductor.id, node.conductor_affinity)
def _check_242cc6a923b3(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -285,21 +294,26 @@ class MigrationCheckersMixin(object):
return data
def _check_5674c57409b9(self, engine, data):
- nodes = db_utils.get_table(engine, 'nodes')
- result = engine.execute(nodes.select())
-
- def _get_state(uuid):
- for row in data:
- if row['uuid'] == uuid:
- return row['provision_state']
-
- for row in result:
- old = _get_state(row['uuid'])
- new = row['provision_state']
- if old is None:
- self.assertEqual('available', new)
- else:
- self.assertEqual(old, new)
+ with engine.begin() as connection:
+ result = connection.execute(
+ sqlalchemy.select(
+ models.Node.uuid,
+ models.Node.provision_state
+ )
+ )
+
+ def _get_state(uuid):
+ for row in data:
+ if row['uuid'] == uuid:
+ return row['provision_state']
+
+ for row in result:
+ old = _get_state(row.uuid)
+ new = row['provision_state']
+ if old is None:
+ self.assertEqual('available', new)
+ else:
+ self.assertEqual(old, new)
def _check_bb59b63f55a(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -360,9 +374,13 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == uuid)
+ node_stmt = sqlalchemy.select(
+ models.Node.name
+ ).where(
+ models.Node.uuid == uuid
+ )
node = connection.execute(node_stmt).first()
- self.assertEqual(bigstring, node['name'])
+ self.assertEqual(bigstring, node.name)
def _check_516faf1bb9b1(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -372,9 +390,13 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == uuid)
+ node_stmt = sqlalchemy.select(
+ models.Node.driver
+ ).where(
+ models.Node.uuid == uuid
+ )
node = connection.execute(node_stmt).first()
- self.assertEqual(bigstring, node['driver'])
+ self.assertEqual(bigstring, node.driver)
def _check_48d6c242bb9b(self, engine, data):
node_tags = db_utils.get_table(engine, 'node_tags')
@@ -390,9 +412,13 @@ class MigrationCheckersMixin(object):
data = {'node_id': '123', 'tag': 'tag1'}
insert_node_tag = node_tags.insert().values(data)
connection.execute(insert_node_tag)
- tag_stmt = node_tags.select(node_tags.c.node_id == '123')
+ tag_stmt = sqlalchemy.select(
+ models.NodeTag.tag
+ ).where(
+ models.NodeTag.node_id == '123'
+ )
tag = connection.execute(tag_stmt).first()
- self.assertEqual('tag1', tag['tag'])
+ self.assertEqual('tag1', tag.tag)
def _check_5ea1b0d310e(self, engine, data):
portgroup = db_utils.get_table(engine, 'portgroups')
@@ -441,17 +467,22 @@ class MigrationCheckersMixin(object):
return data
def _check_f6fdb920c182(self, engine, data):
- ports = db_utils.get_table(engine, 'ports')
- result = engine.execute(ports.select())
- def _was_inserted(uuid):
- for row in data:
- if row['uuid'] == uuid:
- return True
+ with engine.begin() as connection:
+ port_stmt = sqlalchemy.select(
+ models.Port.uuid,
+ models.Port.pxe_enabled
+ )
+ result = connection.execute(port_stmt)
- for row in result:
- if _was_inserted(row['uuid']):
- self.assertTrue(row['pxe_enabled'])
+ def _was_inserted(uuid):
+ for row in data:
+ if row['uuid'] == uuid:
+ return True
+
+ for row in result:
+ if _was_inserted(row['uuid']):
+ self.assertTrue(row['pxe_enabled'])
def _check_e294876e8028(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -494,18 +525,21 @@ class MigrationCheckersMixin(object):
return data
def _check_c14cef6dfedf(self, engine, data):
- nodes = db_utils.get_table(engine, 'nodes')
- result = engine.execute(nodes.select())
counts = collections.defaultdict(int)
+ with engine.begin() as connection:
+ result = connection.execute(
+ sqlalchemy.select(
+ models.Node.uuid,
+ models.Node.network_interface))
- def _was_inserted(uuid):
- for row in data:
- if row['uuid'] == uuid:
- return True
+ def _was_inserted(uuid):
+ for row in data:
+ if row['uuid'] == uuid:
+ return True
- for row in result:
- if _was_inserted(row['uuid']):
- counts[row['network_interface']] += 1
+ for row in result:
+ if _was_inserted(row['uuid']):
+ counts[row['network_interface']] += 1
# using default config values, we should have 2 flat and one neutron
self.assertEqual(2, counts['flat'])
@@ -602,8 +636,10 @@ class MigrationCheckersMixin(object):
self.assertIn('mode', col_names)
self.assertIsInstance(portgroups.c.mode.type,
sqlalchemy.types.String)
-
- result = engine.execute(portgroups.select())
+ with engine.begin() as connection:
+ result = connection.execute(
+ sqlalchemy.select(models.Portgroup.mode)
+ )
for row in result:
self.assertEqual(CONF.default_portgroup_mode, row['mode'])
@@ -675,9 +711,13 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == data['uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.id
+ ).where(
+ models.Node.uuid == data['uuid']
+ )
node = connection.execute(node_stmt).first()
- data['id'] = node['id']
+ data['id'] = node.id
return data
def _check_b4130a7fc904(self, engine, data):
@@ -694,10 +734,13 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_trait = node_traits.insert().values(trait)
connection.execute(insert_trait)
- trait_stmt = node_traits.select(
- node_traits.c.node_id == data['id'])
+ trait_stmt = sqlalchemy.select(
+ models.NodeTrait.trait
+ ).where(
+ models.NodeTrait.node_id == data['id']
+ )
trait = connection.execute(trait_stmt).first()
- self.assertEqual('trait1', trait['trait'])
+ self.assertEqual('trait1', trait.trait)
def _pre_upgrade_82c315d60161(self, engine):
# Create a node to which bios setting can be added.
@@ -706,9 +749,11 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == data['uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.id
+ ).where(models.Node.uuid == data['uuid'])
node = connection.execute(node_stmt).first()
- data['id'] = node['id']
+ data['id'] = node.id
return data
def _check_82c315d60161(self, engine, data):
@@ -736,10 +781,12 @@ class MigrationCheckersMixin(object):
with engine.begin() as connection:
insert_bios_settings = bios_settings.insert().values(setting)
connection.execute(insert_bios_settings)
- setting_stmt = bios_settings.select(
- sqlalchemy.sql.and_(
- bios_settings.c.node_id == data['id'],
- bios_settings.c.name == setting['name']))
+ setting_stmt = sqlalchemy.select(
+ models.BIOSSetting.value
+ ).where(
+ models.BIOSSetting.node_id == data['id'],
+ models.BIOSSetting.name == setting['name']
+ )
setting = connection.execute(setting_stmt).first()
self.assertEqual('on', setting['value'])
@@ -826,15 +873,21 @@ class MigrationCheckersMixin(object):
self.assertIsInstance(tbl.c.conductor_group.type,
sqlalchemy.types.String)
with engine.begin() as connection:
- node_stmt = nodes_tbl.select(
- nodes_tbl.c.uuid == data['node_uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.uuid,
+ models.Node.conductor_group,
+ ).where(
+ models.Node.uuid == data['node_uuid'])
node = connection.execute(node_stmt).first()
- self.assertEqual(node['conductor_group'], "")
+ self.assertEqual(node.conductor_group, "")
- conductor_stmt = conductors_tbl.select(
- conductors_tbl.c.id == data['conductor_id'])
+ conductor_stmt = sqlalchemy.select(
+ models.Conductor.conductor_group,
+ ).where(
+ models.Conductor.id == data['conductor_id'],
+ )
conductor = connection.execute(conductor_stmt).first()
- self.assertEqual(conductor['conductor_group'], "")
+ self.assertEqual(conductor.conductor_group, "")
def _check_d2b036ae9378(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -859,11 +912,15 @@ class MigrationCheckersMixin(object):
self.assertIn('protected_reason', col_names)
with engine.begin() as connection:
- node_stmt = nodes.select(
- nodes.c.uuid == data['node_uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.uuid,
+ models.Node.protected,
+ models.Node.protected_reason
+ ).where(
+ models.Node.uuid == data['node_uuid'])
node = connection.execute(node_stmt).first()
- self.assertFalse(node['protected'])
- self.assertIsNone(node['protected_reason'])
+ self.assertFalse(node.protected)
+ self.assertIsNone(node.protected_reason)
def _check_f190f9d00a11(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -887,10 +944,13 @@ class MigrationCheckersMixin(object):
self.assertIn('allocation_id', col_names)
with engine.begin() as connection:
- node_stmt = nodes.select(
- nodes.c.uuid == data['node_uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.allocation_id
+ ).where(
+ models.Node.uuid == data['node_uuid']
+ )
node = connection.execute(node_stmt).first()
- self.assertIsNone(node['allocation_id'])
+ self.assertIsNone(node.allocation_id)
allocations = db_utils.get_table(engine, 'allocations')
col_names = [column.name for column in allocations.c]
@@ -996,22 +1056,33 @@ class MigrationCheckersMixin(object):
insert_dpt = deploy_templates.insert().values(template)
connection.execute(insert_dpt)
# Query by UUID.
- dpt_uuid_stmt = deploy_templates.select(
- deploy_templates.c.uuid == uuid)
+ dpt_uuid_stmt = sqlalchemy.select(
+ models.DeployTemplate.id,
+ models.DeployTemplate.name,
+ ).where(
+ models.DeployTemplate.uuid == uuid
+ )
result = connection.execute(dpt_uuid_stmt).first()
- template_id = result['id']
- self.assertEqual(name, result['name'])
+ template_id = result.id
+ self.assertEqual(name, result.name)
# Query by name.
- dpt_name_stmt = deploy_templates.select(
- deploy_templates.c.name == name)
+ dpt_name_stmt = sqlalchemy.select(
+ models.DeployTemplate.id
+ ).where(
+ models.DeployTemplate.name == name
+ )
result = connection.execute(dpt_name_stmt).first()
- self.assertEqual(template_id, result['id'])
+ self.assertEqual(template_id, result.id)
# Query by ID.
- dpt_id_stmt = deploy_templates.select(
- deploy_templates.c.id == template_id)
+ dpt_id_stmt = sqlalchemy.select(
+ models.DeployTemplate.uuid,
+ models.DeployTemplate.name
+ ).where(
+ models.DeployTemplate.id == template_id
+ )
result = connection.execute(dpt_id_stmt).first()
- self.assertEqual(uuid, result['uuid'])
- self.assertEqual(name, result['name'])
+ self.assertEqual(uuid, result.uuid)
+ self.assertEqual(name, result.name)
savepoint_uuid = connection.begin_nested()
# UUID is unique.
template = {'name': 'CUSTOM_DT2', 'uuid': uuid}
@@ -1030,6 +1101,7 @@ class MigrationCheckersMixin(object):
# Insert a deploy template step.
interface = 'raid'
step_name = 'create_configuration'
+ # The line below is JSON.
args = '{"logical_disks": []}'
priority = 10
step = {'deploy_template_id': template_id, 'interface': interface,
@@ -1037,15 +1109,30 @@ class MigrationCheckersMixin(object):
insert_dpts = deploy_template_steps.insert().values(step)
connection.execute(insert_dpts)
# Query by deploy template ID.
- query_id_stmt = deploy_template_steps.select(
- deploy_template_steps.c.deploy_template_id
- == template_id)
+ query_id_stmt = sqlalchemy.select(
+ models.DeployTemplateStep.deploy_template_id,
+ models.DeployTemplateStep.interface,
+ models.DeployTemplateStep.step,
+ models.DeployTemplateStep.args,
+ models.DeployTemplateStep.priority,
+ ).where(
+ models.DeployTemplateStep.deploy_template_id == template_id
+ )
result = connection.execute(query_id_stmt).first()
- self.assertEqual(template_id, result['deploy_template_id'])
- self.assertEqual(interface, result['interface'])
- self.assertEqual(step_name, result['step'])
- self.assertEqual(args, result['args'])
- self.assertEqual(priority, result['priority'])
+ self.assertEqual(template_id, result.deploy_template_id)
+ self.assertEqual(interface, result.interface)
+ self.assertEqual(step_name, result.step)
+ if isinstance(result.args, dict):
+ # Postgres testing results in a dict being returned
+ # at this level which if you str() it, you get a dict,
+ # so comparing string to string fails.
+ result_args = json.dumps(result.args)
+ else:
+ # Mysql/MariaDB appears to be actually hand us
+ # a string back so we should be able to compare it.
+ result_args = result.args
+ self.assertEqual(args, result_args)
+ self.assertEqual(priority, result.priority)
# Insert another step for the same template.
insert_step = deploy_template_steps.insert().values(step)
connection.execute(insert_step)
@@ -1103,11 +1190,15 @@ class MigrationCheckersMixin(object):
self.assertIn('retired_reason', col_names)
with engine.begin() as connection:
- node_stmt = nodes.select(
- nodes.c.uuid == data['node_uuid'])
+ node_stmt = sqlalchemy.select(
+ models.Node.retired,
+ models.Node.retired_reason,
+ ).where(
+ models.Node.uuid == data['node_uuid']
+ )
node = connection.execute(node_stmt).first()
- self.assertFalse(node['retired'])
- self.assertIsNone(node['retired_reason'])
+ self.assertFalse(node.retired)
+ self.assertIsNone(node.retired_reason)
def _check_b2ad35726bb0(self, engine, data):
nodes = db_utils.get_table(engine, 'nodes')
@@ -1149,6 +1240,27 @@ class MigrationCheckersMixin(object):
self.assertIsInstance(node_history.c.user.type,
sqlalchemy.types.String)
+ def _check_0ac0f39bc5aa(self, engine, data):
+ node_inventory = db_utils.get_table(engine, 'node_inventory')
+ col_names = [column.name for column in node_inventory.c]
+
+ expected_names = ['version', 'created_at', 'updated_at', 'id',
+ 'node_id', 'inventory_data', 'plugin_data']
+ self.assertEqual(sorted(expected_names), sorted(col_names))
+
+ self.assertIsInstance(node_inventory.c.created_at.type,
+ sqlalchemy.types.DateTime)
+ self.assertIsInstance(node_inventory.c.updated_at.type,
+ sqlalchemy.types.DateTime)
+ self.assertIsInstance(node_inventory.c.id.type,
+ sqlalchemy.types.Integer)
+ self.assertIsInstance(node_inventory.c.node_id.type,
+ sqlalchemy.types.Integer)
+
+ def _check_4dbec778866e(self, engine, data):
+ nodes = db_utils.get_table(engine, 'nodes')
+ self.assertIsInstance(nodes.c.shard.type, sqlalchemy.types.String)
+
def test_upgrade_and_version(self):
with patch_with_engine(self.engine):
self.migration_api.upgrade('head')
@@ -1186,12 +1298,16 @@ class TestMigrationsMySQL(MigrationCheckersMixin,
# this should always fail pre-upgrade
mediumtext = 'a' * (pow(2, 16) + 1)
+ json_text = str({'key': mediumtext})
uuid = uuidutils.generate_uuid()
- expected_to_fail_data = {'uuid': uuid, 'instance_info': mediumtext}
+ expected_to_fail_data = {'uuid': uuid, 'instance_info': json_text}
# this should always work pre-upgrade
- text = 'a' * (pow(2, 16) - 1)
+ text = 'a' * (pow(2, 16) - 13)
+ # The field needs to contain JSON for the decoder to work against
+ # the field.
+ json_text = str({'key': text})
uuid2 = uuidutils.generate_uuid()
- valid_pre_upgrade_data = {'uuid': uuid2, 'instance_info': text}
+ valid_pre_upgrade_data = {'uuid': uuid2, 'instance_info': json_text}
with engine.begin() as connection:
self.assertRaises(db_exc.DBError, connection.execute,
nodes.insert(), expected_to_fail_data)
@@ -1207,21 +1323,36 @@ class TestMigrationsMySQL(MigrationCheckersMixin,
with engine.begin() as connection:
# check that the data for the successful pre-upgrade
# entry didn't change
- node_stmt = nodes.select(nodes.c.uuid == data['uuid'])
- node = connection.execute(node_stmt).first()
- self.assertIsNotNone(node)
- self.assertEqual(data['instance_info'], node['instance_info'])
+ # NOTE(TheJulia): Directly select the field to bypass
+ # field decoding
+ i_info = connection.execute(
+ sqlalchemy.text(
+ "SELECT instance_info from nodes WHERE uuid = "
+ "'%s'" % data['uuid'])).one()
+ self.assertIsNotNone(i_info[0])
+ self.assertEqual(data['instance_info'], i_info[0])
# now this should pass post-upgrade
test = 'b' * (pow(2, 16) + 1)
+ test_text = str({'a': test})
uuid = uuidutils.generate_uuid()
- data = {'uuid': uuid, 'instance_info': test}
+ data = {'uuid': uuid, 'instance_info': test_text}
insert_node = nodes.insert().values(data)
connection.execute(insert_node)
- node_stmt = nodes.select(nodes.c.uuid == uuid)
- node = connection.execute(node_stmt).first()
- self.assertEqual(test, node['instance_info'])
+ # Re-uses the same query to fetch current results
+ i_info = connection.execute(
+ sqlalchemy.text(
+ "SELECT instance_info from nodes WHERE uuid = "
+ "'%s'" % data['uuid'])).one()
+ self.assertEqual(test_text, i_info[0])
+
+ def _check_0ac0f39bc5aa(self, engine, data):
+ node_inventory = db_utils.get_table(engine, 'node_inventory')
+ self.assertIsInstance(node_inventory.c.inventory_data.type,
+ sqlalchemy.dialects.mysql.LONGTEXT)
+ self.assertIsInstance(node_inventory.c.plugin_data.type,
+ sqlalchemy.dialects.mysql.LONGTEXT)
class TestMigrationsPostgreSQL(MigrationCheckersMixin,
@@ -1230,6 +1361,13 @@ class TestMigrationsPostgreSQL(MigrationCheckersMixin,
test_base.BaseTestCase):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
+ def _check_0ac0f39bc5aa(self, engine, data):
+ node_inventory = db_utils.get_table(engine, 'node_inventory')
+ self.assertIsInstance(node_inventory.c.inventory_data.type,
+ sqlalchemy.types.Text)
+ self.assertIsInstance(node_inventory.c.plugin_data.type,
+ sqlalchemy.types.Text)
+
class ModelsMigrationSyncMixin(object):
diff --git a/ironic/tests/unit/db/test_conductor.py b/ironic/tests/unit/db/test_conductor.py
index fe4e93ed9..efc3a38a3 100644
--- a/ironic/tests/unit/db/test_conductor.py
+++ b/ironic/tests/unit/db/test_conductor.py
@@ -18,9 +18,6 @@
import datetime
from unittest import mock
-import oslo_db
-from oslo_db import exception as db_exc
-from oslo_db import sqlalchemy
from oslo_utils import timeutils
from ironic.common import exception
@@ -158,16 +155,6 @@ class DbConductorTestCase(base.DbTestCase):
c = self.dbapi.get_conductor(c.hostname)
self.assertEqual(test_time, timeutils.normalize_time(c.updated_at))
- @mock.patch.object(oslo_db.api.time, 'sleep', autospec=True)
- @mock.patch.object(sqlalchemy.orm.Query, 'update', autospec=True)
- def test_touch_conductor_deadlock(self, mock_update, mock_sleep):
- mock_sleep.return_value = None
- mock_update.side_effect = [db_exc.DBDeadlock(), None]
- c = self._create_test_cdr()
- self.dbapi.touch_conductor(c.hostname)
- self.assertEqual(2, mock_update.call_count)
- self.assertEqual(2, mock_sleep.call_count)
-
def test_touch_conductor_not_found(self):
# A conductor's heartbeat will not create a new record,
# it will only update existing ones
diff --git a/ironic/tests/unit/db/test_node_inventory.py b/ironic/tests/unit/db/test_node_inventory.py
new file mode 100644
index 000000000..8dc638f2b
--- /dev/null
+++ b/ironic/tests/unit/db/test_node_inventory.py
@@ -0,0 +1,36 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironic.common import exception
+from ironic.tests.unit.db import base
+from ironic.tests.unit.db import utils as db_utils
+
+
+class DBNodeInventoryTestCase(base.DbTestCase):
+
+ def setUp(self):
+ super(DBNodeInventoryTestCase, self).setUp()
+ self.node = db_utils.create_test_node()
+ self.inventory = db_utils.create_test_inventory(
+ id=0, node_id=self.node.id,
+ inventory_data={"inventory": "test_inventory"},
+ plugin_data={"plugin_data": "test_plugin_data"})
+
+ def test_destroy_node_inventory_by_node_id(self):
+ self.dbapi.destroy_node_inventory_by_node_id(self.inventory.node_id)
+ self.assertRaises(exception.NodeInventoryNotFound,
+ self.dbapi.get_node_inventory_by_node_id,
+ self.node.id)
+
+ def test_get_inventory_by_node_id(self):
+ res = self.dbapi.get_node_inventory_by_node_id(self.inventory.node_id)
+ self.assertEqual(self.inventory.id, res.id)
diff --git a/ironic/tests/unit/db/test_nodes.py b/ironic/tests/unit/db/test_nodes.py
index eb5200f4e..e7053d6f5 100644
--- a/ironic/tests/unit/db/test_nodes.py
+++ b/ironic/tests/unit/db/test_nodes.py
@@ -367,10 +367,10 @@ class DbNodeTestCase(base.DbTestCase):
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
- res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
+ res = self.dbapi.get_node_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
- res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
+ res = self.dbapi.get_node_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'resource_class': 'foo'})
@@ -558,6 +558,9 @@ class DbNodeTestCase(base.DbTestCase):
'cat': 'meow'},
internal_info={'corgi': 'rocks'},
deploy_interface='purring_machine')
+ utils.create_test_node_traits(node_id=node.id,
+ traits=['atrait'])
+
uuids.append(str(node['uuid']))
req_fields = ['uuid',
'provision_state',
@@ -760,6 +763,15 @@ class DbNodeTestCase(base.DbTestCase):
self.assertRaises(exception.NodeHistoryNotFound,
self.dbapi.get_node_history_by_id, history.id)
+ def test_inventory_get_destroyed_after_destroying_a_node_by_uuid(self):
+ node = utils.create_test_node()
+
+ utils.create_test_inventory(node_id=node.id)
+
+ self.dbapi.destroy_node(node.uuid)
+ self.assertRaises(exception.NodeInventoryNotFound,
+ self.dbapi.get_node_inventory_by_node_id, node.id)
+
def test_update_node(self):
node = utils.create_test_node()
@@ -872,6 +884,53 @@ class DbNodeTestCase(base.DbTestCase):
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_started_at'])
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
+ def test_update_node_inspection_finished_at_inspecting(self, mock_utcnow):
+ mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
+ mock_utcnow.return_value = mocked_time
+ node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ inspection_finished_at=mocked_time,
+ provision_state=states.INSPECTING)
+ res = self.dbapi.update_node(node.id,
+ {'provision_state': states.MANAGEABLE})
+ result = res['inspection_finished_at']
+ self.assertEqual(mocked_time,
+ timeutils.normalize_time(result))
+ self.assertIsNone(res['inspection_started_at'])
+
+ @mock.patch.object(timeutils, 'utcnow', autospec=True)
+ def test_update_node_inspection_finished_at_inspectwait(self,
+ mock_utcnow):
+ mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
+ mock_utcnow.return_value = mocked_time
+ node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ inspection_finished_at=mocked_time,
+ provision_state=states.INSPECTWAIT)
+ res = self.dbapi.update_node(node.id,
+ {'provision_state': states.MANAGEABLE})
+ result = res['inspection_finished_at']
+ self.assertEqual(mocked_time,
+ timeutils.normalize_time(result))
+ self.assertIsNone(res['inspection_started_at'])
+
+ def test_update_node_inspection_started_at_inspecting(self):
+ mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
+ node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ inspection_started_at=mocked_time,
+ provision_state=states.INSPECTING)
+ res = self.dbapi.update_node(node.id,
+ {'provision_state': states.INSPECTFAIL})
+ self.assertIsNone(res['inspection_started_at'])
+
+ def test_update_node_inspection_started_at_inspectwait(self):
+ mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
+ node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ inspection_started_at=mocked_time,
+ provision_state=states.INSPECTWAIT)
+ res = self.dbapi.update_node(node.id,
+ {'provision_state': states.INSPECTFAIL})
+ self.assertIsNone(res['inspection_started_at'])
+
def test_reserve_node(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
@@ -1081,3 +1140,39 @@ class DbNodeTestCase(base.DbTestCase):
self.dbapi.check_node_list,
[node1.uuid, 'this/cannot/be/a/name'])
self.assertIn('this/cannot/be/a/name', str(exc))
+
+ def test_node_provision_state_count(self):
+ active_nodes = 5
+ manageable_nodes = 3
+ deploywait_nodes = 1
+ for i in range(0, active_nodes):
+ utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ provision_state=states.ACTIVE)
+ for i in range(0, manageable_nodes):
+ utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ provision_state=states.MANAGEABLE)
+ for i in range(0, deploywait_nodes):
+ utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ provision_state=states.DEPLOYWAIT)
+
+ self.assertEqual(
+ active_nodes,
+ self.dbapi.count_nodes_in_provision_state(states.ACTIVE)
+ )
+ self.assertEqual(
+ manageable_nodes,
+ self.dbapi.count_nodes_in_provision_state(states.MANAGEABLE)
+ )
+ self.assertEqual(
+ deploywait_nodes,
+ self.dbapi.count_nodes_in_provision_state(states.DEPLOYWAIT)
+ )
+ total = active_nodes + manageable_nodes + deploywait_nodes
+ self.assertEqual(
+ total,
+ self.dbapi.count_nodes_in_provision_state([
+ states.ACTIVE,
+ states.MANAGEABLE,
+ states.DEPLOYWAIT
+ ])
+ )
diff --git a/ironic/tests/unit/db/test_ports.py b/ironic/tests/unit/db/test_ports.py
index 97d1e98a7..0284ee0d0 100644
--- a/ironic/tests/unit/db/test_ports.py
+++ b/ironic/tests/unit/db/test_ports.py
@@ -22,6 +22,23 @@ from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils as db_utils
+def _create_test_port_with_shard(shard, address):
+ node = db_utils.create_test_node(
+ uuid=uuidutils.generate_uuid(),
+ owner='12345', lessee='54321', shard=shard)
+ pg = db_utils.create_test_portgroup(
+ name='pg-%s' % shard,
+ uuid=uuidutils.generate_uuid(),
+ node_id=node.id,
+ address=address)
+ return db_utils.create_test_port(
+ name='port-%s' % shard,
+ uuid=uuidutils.generate_uuid(),
+ node_id=node.id,
+ address=address,
+ portgroup_id=pg.id)
+
+
class DbPortTestCase(base.DbTestCase):
def setUp(self):
@@ -202,6 +219,28 @@ class DbPortTestCase(base.DbTestCase):
def test_get_ports_by_portgroup_id_that_does_not_exist(self):
self.assertEqual([], self.dbapi.get_ports_by_portgroup_id(99))
+ def test_get_ports_by_shard_no_match(self):
+ res = self.dbapi.get_ports_by_shards(['shard1', 'shard2'])
+ self.assertEqual([], res)
+
+ def test_get_ports_by_shard_with_match_single(self):
+ _create_test_port_with_shard('shard1', 'aa:bb:cc:dd:ee:ff')
+
+ res = self.dbapi.get_ports_by_shards(['shard1'])
+ self.assertEqual(1, len(res))
+ self.assertEqual('port-shard1', res[0].name)
+
+ def test_get_ports_by_shard_with_match_multi(self):
+ _create_test_port_with_shard('shard1', 'aa:bb:cc:dd:ee:ff')
+ _create_test_port_with_shard('shard2', 'ab:bb:cc:dd:ee:ff')
+ _create_test_port_with_shard('shard3', 'ac:bb:cc:dd:ee:ff')
+
+ res = self.dbapi.get_ports_by_shards(['shard1', 'shard2'])
+ self.assertEqual(2, len(res))
+ # note(JayF): We do not query for shard3; ensure we don't get it.
+ self.assertNotEqual('port-shard3', res[0].name)
+ self.assertNotEqual('port-shard3', res[1].name)
+
def test_destroy_port(self):
self.dbapi.destroy_port(self.port.id)
self.assertRaises(exception.PortNotFound,
diff --git a/ironic/tests/unit/db/test_shard.py b/ironic/tests/unit/db/test_shard.py
new file mode 100644
index 000000000..b4b7b129f
--- /dev/null
+++ b/ironic/tests/unit/db/test_shard.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for fetching shards via the DB API"""
+import uuid
+
+from oslo_db.sqlalchemy import enginefacade
+
+from ironic.tests.unit.db import base
+from ironic.tests.unit.db import utils
+
+
+class ShardTestCase(base.DbTestCase):
+ def setUp(self):
+ super(ShardTestCase, self).setUp()
+ self.engine = enginefacade.writer.get_engine()
+
+ def test_get_shard_list(self):
+ """Validate shard list is returned, and with correct sorting."""
+ for i in range(1, 2):
+ utils.create_test_node(uuid=str(uuid.uuid4()))
+ for i in range(1, 3):
+ utils.create_test_node(uuid=str(uuid.uuid4()), shard="shard1")
+ for i in range(1, 4):
+ utils.create_test_node(uuid=str(uuid.uuid4()), shard="shard2")
+
+ res = self.dbapi.get_shard_list()
+ self.assertEqual(res, [
+ {"name": "shard2", "count": 3},
+ {"name": "shard1", "count": 2},
+ {"name": "None", "count": 1},
+ ])
+
+ def test_get_shard_empty_list(self):
+ """Validate empty list is returned if no assigned shards."""
+ res = self.dbapi.get_shard_list()
+ self.assertEqual(res, [])
diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py
index e2eae83cc..10055d829 100644
--- a/ironic/tests/unit/db/utils.py
+++ b/ironic/tests/unit/db/utils.py
@@ -28,6 +28,7 @@ from ironic.objects import conductor
from ironic.objects import deploy_template
from ironic.objects import node
from ironic.objects import node_history
+from ironic.objects import node_inventory
from ironic.objects import port
from ironic.objects import portgroup
from ironic.objects import trait
@@ -236,6 +237,7 @@ def get_test_node(**kw):
'network_data': kw.get('network_data'),
'boot_mode': kw.get('boot_mode', None),
'secure_boot': kw.get('secure_boot', None),
+ 'shard': kw.get('shard', None)
}
for iface in drivers_base.ALL_INTERFACES:
@@ -271,6 +273,8 @@ def get_test_port(**kw):
'version': kw.get('version', port.Port.VERSION),
'uuid': kw.get('uuid', '1be26c0b-03f2-4d2e-ae87-c02d7f33c781'),
'node_id': kw.get('node_id', 123),
+ 'node_uuid': kw.get('node_uuid',
+ '59d102f7-5840-4299-8ec8-80c0ebae9de1'),
'address': kw.get('address', '52:54:00:cf:2d:31'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
@@ -445,6 +449,8 @@ def get_test_portgroup(**kw):
'uuid': kw.get('uuid', '6eb02b44-18a3-4659-8c0b-8d2802581ae4'),
'name': kw.get('name', 'fooname'),
'node_id': kw.get('node_id', 123),
+ 'node_uuid': kw.get('node_uuid',
+ '40481b96-306b-4a33-901f-795a3dc2f397'),
'address': kw.get('address', '52:54:00:cf:2d:31'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
@@ -721,3 +727,29 @@ def create_test_history(**kw):
del history['id']
dbapi = db_api.get_instance()
return dbapi.create_node_history(history)
+
+
+def get_test_inventory(**kw):
+ return {
+ 'id': kw.get('id', 345),
+ 'version': kw.get('version', node_inventory.NodeInventory.VERSION),
+ 'node_id': kw.get('node_id', 123),
+ 'inventory_data': kw.get('inventory', {"inventory": "test"}),
+ 'plugin_data': kw.get('plugin_data', {"pdata": {"plugin": "data"}}),
+ 'created_at': kw.get('created_at'),
+ 'updated_at': kw.get('updated_at'),
+ }
+
+
+def create_test_inventory(**kw):
+ """Create test inventory entry in DB and return NodeInventory DB object.
+
+ :param kw: kwargs with overriding values for port's attributes.
+ :returns: Test NodeInventory DB object.
+ """
+ inventory = get_test_inventory(**kw)
+ # Let DB generate ID if it isn't specified explicitly
+ if 'id' not in kw:
+ del inventory['id']
+ dbapi = db_api.get_instance()
+ return dbapi.create_node_inventory(inventory)
diff --git a/ironic/tests/unit/dhcp/test_dnsmasq.py b/ironic/tests/unit/dhcp/test_dnsmasq.py
new file mode 100644
index 000000000..64fe46f33
--- /dev/null
+++ b/ironic/tests/unit/dhcp/test_dnsmasq.py
@@ -0,0 +1,140 @@
+#
+# Copyright 2022 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import tempfile
+
+from ironic.common import dhcp_factory
+from ironic.common import utils as common_utils
+from ironic.conductor import task_manager
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.objects import utils as object_utils
+
+
+class TestDnsmasqDHCPApi(db_base.DbTestCase):
+
+ def setUp(self):
+ super(TestDnsmasqDHCPApi, self).setUp()
+ self.config(dhcp_provider='dnsmasq',
+ group='dhcp')
+ self.node = object_utils.create_test_node(self.context)
+
+ self.ports = [
+ object_utils.create_test_port(
+ self.context, node_id=self.node.id, id=2,
+ uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c782',
+ address='52:54:00:cf:2d:32',
+ pxe_enabled=True)]
+
+ self.optsdir = tempfile.mkdtemp()
+ self.addCleanup(lambda: common_utils.rmtree_without_raise(
+ self.optsdir))
+ self.config(dhcp_optsdir=self.optsdir, group='dnsmasq')
+
+ self.hostsdir = tempfile.mkdtemp()
+ self.addCleanup(lambda: common_utils.rmtree_without_raise(
+ self.hostsdir))
+ self.config(dhcp_hostsdir=self.hostsdir, group='dnsmasq')
+
+ dhcp_factory.DHCPFactory._dhcp_provider = None
+ self.api = dhcp_factory.DHCPFactory()
+ self.opts = [
+ {
+ 'ip_version': 4,
+ 'opt_name': '67',
+ 'opt_value': 'bootx64.efi'
+ },
+ {
+ 'ip_version': 4,
+ 'opt_name': '210',
+ 'opt_value': '/tftpboot/'
+ },
+ {
+ 'ip_version': 4,
+ 'opt_name': '66',
+ 'opt_value': '192.0.2.135',
+ },
+ {
+ 'ip_version': 4,
+ 'opt_name': '150',
+ 'opt_value': '192.0.2.135'
+ },
+ {
+ 'ip_version': 4,
+ 'opt_name': '255',
+ 'opt_value': '192.0.2.135'
+ }
+ ]
+
+ def test_update_dhcp(self):
+ with task_manager.acquire(self.context,
+ self.node.uuid) as task:
+ self.api.update_dhcp(task, self.opts)
+
+ dnsmasq_tag = task.node.driver_internal_info.get('dnsmasq_tag')
+ self.assertEqual(36, len(dnsmasq_tag))
+
+ hostfile = os.path.join(self.hostsdir,
+ 'ironic-52:54:00:cf:2d:32.conf')
+ with open(hostfile, 'r') as f:
+ self.assertEqual(
+ '52:54:00:cf:2d:32,set:%s,set:ironic\n' % dnsmasq_tag,
+ f.readline())
+
+ optsfile = os.path.join(self.optsdir,
+ 'ironic-%s.conf' % self.node.uuid)
+ with open(optsfile, 'r') as f:
+ self.assertEqual([
+ 'tag:%s,67,bootx64.efi\n' % dnsmasq_tag,
+ 'tag:%s,210,/tftpboot/\n' % dnsmasq_tag,
+ 'tag:%s,66,192.0.2.135\n' % dnsmasq_tag,
+ 'tag:%s,150,192.0.2.135\n' % dnsmasq_tag,
+ 'tag:%s,255,192.0.2.135\n' % dnsmasq_tag],
+ f.readlines())
+
+ def test_get_ip_addresses(self):
+ with task_manager.acquire(self.context,
+ self.node.uuid) as task:
+ with tempfile.NamedTemporaryFile() as fp:
+ self.config(dhcp_leasefile=fp.name, group='dnsmasq')
+ fp.write(b"1659975057 52:54:00:cf:2d:32 192.0.2.198 * *\n")
+ fp.flush()
+ self.assertEqual(
+ ['192.0.2.198'],
+ self.api.provider.get_ip_addresses(task))
+
+ def test_clean_dhcp_opts(self):
+ with task_manager.acquire(self.context,
+ self.node.uuid) as task:
+ self.api.update_dhcp(task, self.opts)
+
+ hostfile = os.path.join(self.hostsdir,
+ 'ironic-52:54:00:cf:2d:32.conf')
+ optsfile = os.path.join(self.optsdir,
+ 'ironic-%s.conf' % self.node.uuid)
+ self.assertTrue(os.path.isfile(hostfile))
+ self.assertTrue(os.path.isfile(optsfile))
+
+ with task_manager.acquire(self.context,
+ self.node.uuid) as task:
+ self.api.clean_dhcp(task)
+
+ # assert the host file remains with the ignore directive, and the opts
+ # file is deleted
+ with open(hostfile, 'r') as f:
+ self.assertEqual(
+ '52:54:00:cf:2d:32,ignore\n',
+ f.readline())
+ self.assertFalse(os.path.isfile(optsfile))
diff --git a/ironic/tests/unit/drivers/modules/drac/test_raid.py b/ironic/tests/unit/drivers/modules/drac/test_raid.py
index 780d2893c..acbd009d3 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_raid.py
@@ -2457,13 +2457,145 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
self.assertEqual(False, result)
mock_log.assert_called_once()
+ @mock.patch.object(deploy_utils, 'reboot_to_finish_step',
+ autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode(
+ self, mock_get_system, mock_reboot):
+ mock_task_mon = mock.Mock(task_monitor_uri='/TaskService/1')
+ mock_oem_controller = mock.Mock()
+ mock_oem_controller.convert_to_raid.return_value = mock_task_mon
+ mock_controller = mock.Mock()
+ mock_controller.get_oem_extension.return_value = mock_oem_controller
+ mock_controllers_col = mock.Mock()
+ mock_controllers_col.get_members.return_value = [mock_controller]
+ mock_storage = mock.Mock(controllers=mock_controllers_col)
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertEqual(
+ ['/TaskService/1'],
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertEqual(mock_reboot.return_value, result)
+
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_no_conversion(
+ self, mock_get_system):
+ mock_oem_controller = mock.Mock()
+ mock_oem_controller.convert_to_raid.return_value = None
+ mock_controller = mock.Mock()
+ mock_controller.get_oem_extension.return_value = mock_oem_controller
+ mock_controllers_col = mock.Mock()
+ mock_controllers_col.get_members.return_value = [mock_controller]
+ mock_storage = mock.Mock(controllers=mock_controllers_col)
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+
+ @mock.patch.object(drac_raid.LOG, 'warning', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_not_raid(
+ self, mock_get_system, mock_log):
+ mock_storage = mock.Mock(storage_controllers=None)
+ mock_controllers = mock.PropertyMock(
+ side_effect=sushy.exceptions.MissingAttributeError)
+ type(mock_storage).controllers = mock_controllers
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+ mock_log.assert_not_called()
+
+ @mock.patch.object(drac_raid.LOG, 'warning', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_old_idrac(
+ self, mock_get_system, mock_log):
+ mock_storage = mock.Mock(storage_controllers=mock.Mock())
+ mock_controllers = mock.PropertyMock(
+ side_effect=sushy.exceptions.MissingAttributeError)
+ type(mock_storage).controllers = mock_controllers
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+ mock_log.assert_called_once()
+
+ @mock.patch.object(drac_raid.LOG, 'warning', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_old_sushy(
+ self, mock_get_system, mock_log):
+ mock_storage = mock.Mock(spec=[])
+ mock_storage.identity = "Storage 1"
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+ mock_log.assert_called_once()
+
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test__convert_controller_to_raid_mode_old_sushy_oem(
+ self, mock_get_system):
+ mock_controller = mock.Mock()
+ mock_controller.get_oem_extension.side_effect =\
+ sushy.exceptions.ExtensionError
+ mock_controllers_col = mock.Mock()
+ mock_controllers_col.get_members.return_value = [mock_controller]
+ mock_storage = mock.Mock(controllers=mock_controllers_col)
+ mock_storage_col = mock.Mock()
+ mock_storage_col.get_members.return_value = [mock_storage]
+ mock_system = mock.Mock(storage=mock_storage_col)
+ mock_get_system.return_value = mock_system
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ result = self.raid._convert_controller_to_raid_mode(task)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(result)
+
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_convert_controller_to_raid_mode', autospec=True)
@mock.patch.object(deploy_utils, 'get_async_step_return_state',
autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
def test_post_delete_configuration_foreign_async(
self, mock_get_system, mock_build_agent_options,
- mock_get_async_step_return_state):
+ mock_get_async_step_return_state, mock_convert):
fake_oem_system = mock.Mock()
fake_system = mock.Mock()
fake_system.get_oem_extension.return_value = fake_oem_system
@@ -2497,9 +2629,13 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
mock_get_async_step_return_state.assert_called_once_with(task.node)
mock_task_mon1.wait.assert_not_called()
mock_task_mon2.wait.assert_not_called()
+ mock_convert.assert_not_called()
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_convert_controller_to_raid_mode', autospec=True)
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
- def test_post_delete_configuration_foreign_sync(self, mock_get_system):
+ def test_post_delete_configuration_foreign_sync(
+ self, mock_get_system, mock_convert):
fake_oem_system = mock.Mock()
fake_system = mock.Mock()
fake_system.get_oem_extension.return_value = fake_oem_system
@@ -2520,15 +2656,34 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
mock_task_mon2.get_task.return_value = mock_task2
fake_oem_system.clear_foreign_config.return_value = [
mock_task_mon1, mock_task_mon2]
+ mock_convert_state = mock.Mock()
+ mock_convert.return_value = mock_convert_state
result = self.raid.post_delete_configuration(
task, None, return_state=mock_return_state1)
- self.assertEqual(result, mock_return_state1)
+ self.assertEqual(result, mock_convert_state)
fake_oem_system.clear_foreign_config.assert_called_once()
mock_task_mon1.wait.assert_called_once_with(CONF.drac.raid_job_timeout)
mock_task_mon2.wait.assert_not_called()
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_convert_controller_to_raid_mode', autospec=True)
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_clear_foreign_config', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test_post_delete_configuration_no_subtasks(
+ self, mock_get_system, mock_foreign, mock_convert):
+ mock_foreign.return_value = False
+ mock_convert.return_value = None
+ task = mock.Mock(node=self.node, context=self.context)
+ mock_return_state1 = mock.Mock()
+
+ result = self.raid.post_delete_configuration(
+ task, None, return_state=mock_return_state1)
+
+ self.assertEqual(mock_return_state1, result)
+
@mock.patch.object(drac_raid.LOG, 'warning', autospec=True)
def test__clear_foreign_config_attribute_error(self, mock_log):
fake_oem_system = mock.Mock(spec=[])
@@ -2682,6 +2837,41 @@ class DracRedfishRAIDTestCase(test_utils.BaseDracTest):
task.node.driver_internal_info.get('raid_task_monitor_uris'))
self.raid._set_failed.assert_called_once()
+ @mock.patch.object(drac_raid.DracRedfishRAID,
+ '_convert_controller_to_raid_mode', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_task_monitor', autospec=True)
+ def test__check_raid_tasks_status_convert_controller(
+ self, mock_get_task_monitor, mock_convert):
+ driver_internal_info = {
+ 'raid_task_monitor_uris': '/TaskService/1',
+ 'raid_config_substep': 'clear_foreign_config'}
+ self.node.driver_internal_info = driver_internal_info
+ self.node.save()
+
+ mock_config_task = mock.Mock()
+ mock_config_task.task_state = sushy.TASK_STATE_COMPLETED
+ mock_config_task.task_status = sushy.HEALTH_OK
+ mock_task_monitor = mock.Mock()
+ mock_task_monitor.is_processing = False
+ mock_task_monitor.get_task.return_value = mock_config_task
+ mock_get_task_monitor.return_value = mock_task_monitor
+
+ self.raid._set_success = mock.Mock()
+ self.raid._set_failed = mock.Mock()
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.raid._check_raid_tasks_status(
+ task, ['/TaskService/1'])
+
+ mock_convert.assert_called_once_with(task)
+ self.raid._set_success.assert_not_called()
+ self.raid._set_failed.assert_not_called()
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_task_monitor_uris'))
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_config_substep'))
+
@mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
autospec=True)
@mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_common.py b/ironic/tests/unit/drivers/modules/ilo/test_common.py
index 352eb0837..c3e22453f 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_common.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_common.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
@@ -30,6 +31,7 @@ from oslo_utils import uuidutils
from ironic.common import boot_devices
from ironic.common import exception
+from ironic.common import image_service
from ironic.common import images
from ironic.common import swift
from ironic.conductor import task_manager
@@ -374,6 +376,22 @@ class IloCommonMethodsTestCase(BaseIloTest):
expected_info = dict(self.info, **ipmi_info)
self.assertEqual(expected_info, actual_info)
+ def test_update_redfish_properties(self):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ redfish_info = {
+ "redfish_address": "1.2.3.4",
+ "redfish_username": "admin",
+ "redfish_password": "fake",
+ "redfish_verify_ca": None,
+ "redfish_system_id": "/redfish/v1/Systems/1"
+ }
+ task.node.driver_info = self.info
+ ilo_common.update_redfish_properties(task)
+ actual_info = task.node.driver_info
+ expected_info = dict(self.info, **redfish_info)
+ self.assertEqual(expected_info, actual_info)
+
def test__get_floppy_image_name(self):
image_name_expected = 'image-' + self.node.uuid
image_name_actual = ilo_common._get_floppy_image_name(self.node)
@@ -1504,3 +1522,37 @@ class IloCommonMethodsTestCase(BaseIloTest):
self.assertRaises(exception.IloOperationError,
ilo_common.setup_uefi_https,
task, iso, True)
+
+ @mock.patch.object(image_service, 'FileImageService', spec_set=True,
+ autospec=True)
+ @mock.patch.object(image_service, 'HttpImageService', spec_set=True,
+ autospec=True)
+ @mock.patch.object(builtins, 'open', autospec=True)
+ def test_download_file_url(self, open_mock, http_mock, file_mock):
+ url = "file:///test1/iLO.crt"
+ target_file = "/a/b/c"
+ fd_mock = mock.MagicMock(spec=io.BytesIO)
+ open_mock.return_value = fd_mock
+ fd_mock.__enter__.return_value = fd_mock
+ ilo_common.download(target_file, url)
+ open_mock.assert_called_once_with(target_file, 'wb')
+ http_mock.assert_not_called()
+ file_mock.return_value.download.assert_called_once_with(
+ "/test1/iLO.crt", fd_mock)
+
+ @mock.patch.object(image_service, 'FileImageService', spec_set=True,
+ autospec=True)
+ @mock.patch.object(image_service, 'HttpImageService', spec_set=True,
+ autospec=True)
+ @mock.patch.object(builtins, 'open', autospec=True)
+ def test_download_http_url(self, open_mock, http_mock, file_mock):
+ url = "http://1.1.1.1/iLO.crt"
+ target_file = "/a/b/c"
+ fd_mock = mock.MagicMock(spec=io.BytesIO)
+ open_mock.return_value = fd_mock
+ fd_mock.__enter__.return_value = fd_mock
+ ilo_common.download(target_file, url)
+ http_mock.return_value.download.assert_called_once_with(
+ "http://1.1.1.1/iLO.crt", fd_mock)
+ file_mock.assert_not_called()
+ open_mock.assert_called_once_with(target_file, 'wb')
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_management.py b/ironic/tests/unit/drivers/modules/ilo/test_management.py
index e4d891c3d..f087c4d58 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_management.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_management.py
@@ -14,9 +14,12 @@
"""Test class for Management Interface used by iLO modules."""
+import os
+import shutil
from unittest import mock
import ddt
+from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
@@ -42,6 +45,8 @@ ilo_error = importutils.try_import('proliantutils.exception')
INFO_DICT = db_utils.get_test_ilo_info()
+CONF = cfg.CONF
+
@ddt.ddt
class IloManagementTestCase(test_common.BaseIloTest):
@@ -424,6 +429,116 @@ class IloManagementTestCase(test_common.BaseIloTest):
step_mock.assert_called_once_with(
task.node, 'update_authentication_failure_logging', '1', False)
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ def test_create_csr(self, os_mock, step_mock):
+ csr_params_args = {
+ "City": "Bangalore",
+ "CommonName": "1.1.1.1",
+ "Country": "ABC",
+ "OrgName": "DEF",
+ "State": "IJK"
+ }
+ csr_args = {
+ "csr_params": csr_params_args}
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.management.create_csr(task, **csr_args)
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ step_mock.assert_called_once_with(task.node, 'create_csr',
+ cert_path, csr_params_args)
+ os_mock.assert_called_once_with(cert_path, 0o755)
+
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ @mock.patch.object(shutil, 'copy', spec_set=True, autospec=True)
+ def test_add_https_certificate(self, shutil_mock, os_mock,
+ step_mock):
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ cert_file_args = {'cert_file': '/test1/cert'}
+ task.driver.management.add_https_certificate(
+ task, **cert_file_args)
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ cert_path_name = os.path.join(cert_path, self.node.uuid)
+ filename = cert_path_name + ".crt"
+ step_mock.assert_called_once_with(
+ task.node, 'add_https_certificate', filename)
+ os_mock.assert_called_once_with(cert_path, 0o755)
+ shutil_mock.assert_called_once_with('/test1/cert', filename)
+
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ @mock.patch.object(shutil, 'copy', spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'download', spec_set=True, autospec=True)
+ def test_add_https_certificate_fileurl(self, download_mock, shutil_mock,
+ os_mock, step_mock):
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ cert_file_args = {'cert_file': 'file:///test1/cert'}
+ task.driver.management.add_https_certificate(
+ task, **cert_file_args)
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ cert_path_name = os.path.join(cert_path, self.node.uuid)
+ fname = cert_path_name + ".crt"
+ step_mock.assert_called_once_with(
+ task.node, 'add_https_certificate', fname)
+ os_mock.assert_called_once_with(cert_path, 0o755)
+ shutil_mock.assert_not_called()
+ download_mock.assert_called_once_with(fname, 'file:///test1/cert')
+
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ @mock.patch.object(shutil, 'copy', spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'download', spec_set=True, autospec=True)
+ def test_add_https_certificate_httpurl(self, download_mock, shutil_mock,
+ os_mock, step_mock):
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ cert_file_args = {'cert_file': 'http://1.1.1.1/cert'}
+ task.driver.management.add_https_certificate(
+ task, **cert_file_args)
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ cert_path_name = os.path.join(cert_path, self.node.uuid)
+ fname = cert_path_name + ".crt"
+ step_mock.assert_called_once_with(
+ task.node, 'add_https_certificate', fname)
+ os_mock.assert_called_once_with(cert_path, 0o755)
+ shutil_mock.assert_not_called()
+ download_mock.assert_called_once_with(fname, 'http://1.1.1.1/cert')
+
+ @mock.patch.object(ilo_management, '_execute_ilo_step',
+ spec_set=True, autospec=True)
+ @mock.patch.object(os, 'makedirs', spec_set=True, autospec=True)
+ @mock.patch.object(shutil, 'copy', spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'download', spec_set=True, autospec=True)
+ def test_add_https_certificate_url_exception(self, download_mock,
+ shutil_mock, os_mock,
+ step_mock):
+ CONF.ilo.cert_path = "/var/lib/ironic/ilo"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ cert_file_args = {'cert_file': 'swift://1.1.1.1/cert'}
+ self.assertRaises(exception.IloOperationNotSupported,
+ task.driver.management.add_https_certificate,
+ task,
+ **cert_file_args)
+
+ cert_path = os.path.join(CONF.ilo.cert_path, self.node.uuid)
+ step_mock.assert_not_called()
+ os_mock.assert_called_once_with(cert_path, 0o755)
+ shutil_mock.assert_not_called()
+ download_mock.assert_not_called()
+
@mock.patch.object(deploy_utils, 'build_agent_options',
spec_set=True, autospec=True)
@mock.patch.object(ilo_boot.IloVirtualMediaBoot, 'clean_up_ramdisk',
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_vendor.py b/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
index f3114826e..b7bc3cbce 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
@@ -1,3 +1,4 @@
+# Copyright 2022 Hewlett Packard Enterprise Development LP
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
@@ -30,6 +31,7 @@ from ironic.tests.unit.drivers.modules.ilo import test_common
class VendorPassthruTestCase(test_common.BaseIloTest):
boot_interface = 'ilo-virtual-media'
+ vendor_interface = 'ilo'
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@@ -95,3 +97,72 @@ class VendorPassthruTestCase(test_common.BaseIloTest):
task, info)
validate_image_prop_mock.assert_called_once_with(
task.context, 'foo')
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test__validate_is_it_a_supported_system(
+ self, get_ilo_object_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.maintenance = True
+ ilo_mock_object = get_ilo_object_mock.return_value
+ ilo_mock_object.get_product_name.return_value = (
+ 'ProLiant DL380 Gen10')
+ task.driver.vendor._validate_is_it_a_supported_system(task)
+ get_ilo_object_mock.assert_called_once_with(task.node)
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test__validate_is_it_a_supported_system_exception(
+ self, get_ilo_object_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.maintenance = True
+ ilo_mock_object = get_ilo_object_mock.return_value
+ ilo_mock_object.get_product_name.return_value = (
+ 'ProLiant DL380 Gen8')
+ self.assertRaises(
+ exception.IloOperationNotSupported,
+ task.driver.vendor._validate_is_it_a_supported_system, task)
+
+ @mock.patch.object(ilo_common, 'parse_driver_info',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ilo_common, 'update_redfish_properties',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ilo_vendor.VendorPassthru,
+ '_validate_is_it_a_supported_system',
+ spec_set=True, autospec=True)
+ def test_validate_create_subscription(self, validate_redfish_system_mock,
+ redfish_properties_mock,
+ driver_info_mock):
+ self.node.vendor_interface = 'ilo'
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ d_info = {'ilo_address': '1.1.1.1',
+ 'ilo_username': 'user',
+ 'ilo_password': 'password',
+ 'ilo_verify_ca': False}
+ driver_info_mock.return_value = d_info
+ redfish_properties = {'redfish_address': '1.1.1.1',
+ 'redfish_username': 'user',
+ 'redfish_password': 'password',
+ 'redfish_system_id': '/redfish/v1/Systems/1',
+ 'redfish_verify_ca': False}
+ redfish_properties_mock.return_value = redfish_properties
+ kwargs = {'Destination': 'https://someulr',
+ 'Context': 'MyProtocol'}
+ task.driver.vendor.validate(task, 'create_subscription', **kwargs)
+ driver_info_mock.assert_called_once_with(task.node)
+ redfish_properties_mock.assert_called_once_with(task)
+ validate_redfish_system_mock.assert_called_once_with(
+ task.driver.vendor, task)
+
+ def test_validate_operation_exeption(self):
+ self.node.vendor_interface = 'ilo'
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaises(
+ exception.IloOperationNotSupported,
+ task.driver.vendor.validate, task, 'eject_vmedia')
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_common.py b/ironic/tests/unit/drivers/modules/irmc/test_common.py
index 9dbb380ba..f125d7bd5 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_common.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_common.py
@@ -412,3 +412,132 @@ class IRMCCommonMethodsTestCase(BaseIRMCTest):
info = irmc_common.parse_driver_info(task.node)
mock_elcm.set_secure_boot_mode.assert_called_once_with(
info, True)
+
+ @mock.patch.object(irmc_common, 'elcm',
+ spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
+ def test_check_elcm_license_success_with_200(self, elcm_mock):
+ elcm_req_mock = elcm_mock.elcm_request
+ json_data = ('{ "eLCMStatus" : { "EnabledAndLicenced" : "true" , '
+ '"SDCardMounted" : "false" } }')
+ func_return_value = {'active': True, 'status_code': 200}
+ response_mock = elcm_req_mock.return_value
+ response_mock.status_code = 200
+ response_mock.text = json_data
+ self.assertEqual(irmc_common.check_elcm_license(self.node),
+ func_return_value)
+
+ @mock.patch.object(irmc_common, 'elcm',
+ spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
+ def test_check_elcm_license_success_with_500(self, elcm_mock):
+ elcm_req_mock = elcm_mock.elcm_request
+ json_data = ''
+ func_return_value = {'active': False, 'status_code': 500}
+ response_mock = elcm_req_mock.return_value
+ response_mock.status_code = 500
+ response_mock.text = json_data
+ self.assertEqual(irmc_common.check_elcm_license(self.node),
+ func_return_value)
+
+ @mock.patch.object(irmc_common, 'scci',
+ spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
+ @mock.patch.object(irmc_common, 'elcm',
+ spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
+ def test_check_elcm_license_fail_invalid_json(self, elcm_mock, scci_mock):
+ scci_mock.SCCIError = Exception
+ elcm_req_mock = elcm_mock.elcm_request
+ json_data = ''
+ response_mock = elcm_req_mock.return_value
+ response_mock.status_code = 200
+ response_mock.text = json_data
+ self.assertRaises(exception.IRMCOperationError,
+ irmc_common.check_elcm_license, self.node)
+
+ @mock.patch.object(irmc_common, 'scci',
+ spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
+ @mock.patch.object(irmc_common, 'elcm',
+ spec_set=mock_specs.SCCICLIENT_IRMC_ELCM_SPEC)
+ def test_check_elcm_license_fail_elcm_error(self, elcm_mock, scci_mock):
+ scci_mock.SCCIError = Exception
+ elcm_req_mock = elcm_mock.elcm_request
+ elcm_req_mock.side_effect = scci_mock.SCCIError
+ self.assertRaises(exception.IRMCOperationError,
+ irmc_common.check_elcm_license, self.node)
+
+ @mock.patch.object(irmc_common, 'get_irmc_report', autospec=True)
+ @mock.patch.object(irmc_common, 'scci',
+ spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
+ def test_set_irmc_version_success(self, scci_mock, get_report_mock):
+ version_str = 'iRMC S6/2.00'
+ scci_mock.get_irmc_version_str.return_value = version_str.split('/')
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ irmc_common.set_irmc_version(task)
+ self.assertEqual(version_str,
+ task.node.driver_internal_info['irmc_fw_version'])
+
+ @mock.patch.object(irmc_common, 'get_irmc_report', autospec=True)
+ @mock.patch.object(irmc_common, 'scci',
+ spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
+ def test_set_irmc_version_fail(self, scci_mock, get_report_mock):
+ scci_mock.SCCIError = Exception
+ get_report_mock.side_effect = scci_mock.SCCIError
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaises(exception.IRMCOperationError,
+ irmc_common.set_irmc_version, task)
+
+ def test_within_version_ranges_success(self):
+ self.node.set_driver_internal_info('irmc_fw_version', 'iRMC S6/2.00')
+ ver_range_list = [
+ {'4': {'upper': '1.05'},
+ '6': {'min': '1.95', 'upper': '2.01'}
+ },
+ {'4': {'upper': '1.05'},
+ '6': {'min': '1.95', 'upper': None}
+ },
+ {'4': {'upper': '1.05'},
+ '6': {'min': '1.95'}
+ },
+ {'4': {'upper': '1.05'},
+ '6': {}
+ },
+ {'4': {'upper': '1.05'},
+ '6': None
+ }]
+ for range_dict in ver_range_list:
+ with self.subTest():
+ self.assertTrue(irmc_common.within_version_ranges(self.node,
+ range_dict))
+
+ def test_within_version_ranges_success_out_range(self):
+ self.node.set_driver_internal_info('irmc_fw_version', 'iRMC S6/2.00')
+ ver_range_list = [
+ {'4': {'upper': '1.05'},
+ '6': {'min': '1.95', 'upper': '2.00'}
+ },
+ {'4': {'upper': '1.05'},
+ '6': {'min': '1.95', 'upper': '1.99'}
+ },
+ {'4': {'upper': '1.05'},
+ }]
+ for range_dict in ver_range_list:
+ with self.subTest():
+ self.assertFalse(irmc_common.within_version_ranges(self.node,
+ range_dict))
+
+ def test_within_version_ranges_fail_no_match(self):
+ self.node.set_driver_internal_info('irmc_fw_version', 'ver/2.00')
+ ver_range = {
+ '4': {'upper': '1.05'},
+ '6': {'min': '1.95', 'upper': '2.01'}
+ }
+ self.assertFalse(irmc_common.within_version_ranges(self.node,
+ ver_range))
+
+ def test_within_version_ranges_fail_no_version_set(self):
+ ver_range = {
+ '4': {'upper': '1.05'},
+ '6': {'min': '1.95', 'upper': '2.01'}
+ }
+ self.assertFalse(irmc_common.within_version_ranges(self.node,
+ ver_range))
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_inspect.py b/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
index 5c66cb96a..2cec2429f 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
@@ -64,13 +64,16 @@ class IRMCInspectInternalMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_inspect, '_get_mac_addresses', spec_set=True,
autospec=True)
- @mock.patch.object(irmc_inspect, 'scci',
+ @mock.patch.object(irmc_inspect.irmc, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
- def test__inspect_hardware(
+ def test__inspect_hardware_ipmi(
self, get_irmc_report_mock, scci_mock, _get_mac_addresses_mock):
# Set config flags
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
gpu_ids = ['0x1000/0x0079', '0x2100/0x0080']
cpu_fpgas = ['0x1000/0x0179', '0x2100/0x0180']
self.config(gpu_ids=gpu_ids, group='irmc')
@@ -117,9 +120,68 @@ class IRMCInspectInternalMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual((expected_props, inspected_macs, new_traits),
result)
+ @mock.patch.object(
+ irmc_inspect, '_get_capabilities_properties_without_ipmi',
+ autospec=True)
+ @mock.patch.object(irmc_inspect, '_get_mac_addresses', spec_set=True,
+ autospec=True)
+ @mock.patch.object(irmc_inspect.irmc, 'scci',
+ spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
+ @mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
+ autospec=True)
+ def test__inspect_hardware_redfish(
+ self, get_irmc_report_mock, scci_mock, _get_mac_addresses_mock,
+ _get_cap_prop_without_ipmi_mock):
+ # Set config flags
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+
+ kwargs = {'sleep_flag': False}
+
+ parsed_info = irmc_common.parse_driver_info(self.node)
+ inspected_props = {
+ 'memory_mb': '1024',
+ 'local_gb': 10,
+ 'cpus': 2,
+ 'cpu_arch': 'x86_64'}
+ inspected_capabilities = {
+ 'irmc_firmware_version': 'iRMC S6-2.00S',
+ 'server_model': 'TX2540M1F5',
+ 'rom_firmware_version': 'V4.6.5.4 R1.15.0 for D3099-B1x'}
+ formatted_caps = utils.get_updated_capabilities(
+ '', inspected_capabilities)
+ existing_traits = ['EXISTING_TRAIT']
+ passed_cap_prop = {'irmc_firmware_version',
+ 'rom_firmware_version', 'server_model'}
+ inspected_macs = ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb']
+ report = 'fake_report'
+ get_irmc_report_mock.return_value = report
+ scci_mock.get_essential_properties.return_value = inspected_props
+ _get_cap_prop_without_ipmi_mock.return_value = {
+ 'capabilities': formatted_caps,
+ **inspected_props}
+ _get_mac_addresses_mock.return_value = inspected_macs
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ result = irmc_inspect._inspect_hardware(task.node,
+ existing_traits,
+ **kwargs)
+ get_irmc_report_mock.assert_called_once_with(task.node)
+ scci_mock.get_essential_properties.assert_called_once_with(
+ report, irmc_inspect.IRMCInspect.ESSENTIAL_PROPERTIES)
+ _get_cap_prop_without_ipmi_mock.assert_called_once_with(
+ parsed_info, passed_cap_prop, '', inspected_props)
+
+ expected_props = dict(inspected_props)
+ inspected_capabilities = utils.get_updated_capabilities(
+ '', inspected_capabilities)
+ expected_props['capabilities'] = inspected_capabilities
+ self.assertEqual((expected_props, inspected_macs, existing_traits),
+ result)
+
@mock.patch.object(irmc_inspect, '_get_mac_addresses', spec_set=True,
autospec=True)
- @mock.patch.object(irmc_inspect, 'scci',
+ @mock.patch.object(irmc_inspect.irmc, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
@@ -130,8 +192,8 @@ class IRMCInspectInternalMethodsTestCase(test_common.BaseIRMCTest):
get_irmc_report_mock.return_value = report
side_effect = exception.SNMPFailure("fake exception")
scci_mock.get_essential_properties.side_effect = side_effect
- irmc_inspect.scci.SCCIInvalidInputError = Exception
- irmc_inspect.scci.SCCIClientError = Exception
+ irmc_inspect.irmc.scci.SCCIInvalidInputError = Exception
+ irmc_inspect.irmc.scci.SCCIClientError = Exception
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
@@ -192,6 +254,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
autospec=True)
def test_inspect_hardware(self, power_state_mock, _inspect_hardware_mock,
port_mock, info_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
inspected_props = {
'memory_mb': '1024',
'local_gb': 10,
@@ -204,8 +269,8 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
_inspect_hardware_mock.return_value = (inspected_props,
inspected_macs,
new_traits)
- new_port_mock1 = mock.MagicMock(spec=objects.Port)
- new_port_mock2 = mock.MagicMock(spec=objects.Port)
+ new_port_mock1 = objects.Port
+ new_port_mock2 = objects.Port
port_mock.side_effect = [new_port_mock1, new_port_mock2]
@@ -220,11 +285,11 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
port_mock.assert_has_calls([
mock.call(task.context, address=inspected_macs[0],
node_id=node_id),
+ mock.call.create(),
mock.call(task.context, address=inspected_macs[1],
- node_id=node_id)
- ])
- new_port_mock1.create.assert_called_once_with()
- new_port_mock2.create.assert_called_once_with()
+ node_id=node_id),
+ mock.call.create()
+ ], any_order=False)
self.assertTrue(info_mock.called)
task.node.refresh()
@@ -247,6 +312,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
port_mock, info_mock,
set_boot_device_mock,
power_action_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
inspected_props = {
'memory_mb': '1024',
'local_gb': 10,
@@ -259,8 +327,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
_inspect_hardware_mock.return_value = (inspected_props,
inspected_macs,
new_traits)
- new_port_mock1 = mock.MagicMock(spec=objects.Port)
- new_port_mock2 = mock.MagicMock(spec=objects.Port)
+
+ new_port_mock1 = objects.Port
+ new_port_mock2 = objects.Port
port_mock.side_effect = [new_port_mock1, new_port_mock2]
@@ -276,11 +345,11 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
port_mock.assert_has_calls([
mock.call(task.context, address=inspected_macs[0],
node_id=node_id),
+ mock.call.create(),
mock.call(task.context, address=inspected_macs[1],
- node_id=node_id)
- ])
- new_port_mock1.create.assert_called_once_with()
- new_port_mock2.create.assert_called_once_with()
+ node_id=node_id),
+ mock.call.create()
+ ], any_order=False)
self.assertTrue(info_mock.called)
task.node.refresh()
@@ -296,6 +365,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
autospec=True)
def test_inspect_hardware_inspect_exception(
self, power_state_mock, _inspect_hardware_mock, port_mock):
+ self.node.set_driver_internal_info('irmc_fw_version', 'iRMC S4/7.82F')
+ self.node.save()
+
side_effect = exception.HardwareInspectionFailure("fake exception")
_inspect_hardware_mock.side_effect = side_effect
power_state_mock.return_value = states.POWER_ON
@@ -320,6 +392,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
def test_inspect_hardware_mac_already_exist(
self, power_state_mock, _inspect_hardware_mock,
port_mock, warn_mock, trait_mock):
+ self.node.set_driver_internal_info('irmc_fw_version', 'iRMC S4/7.82F')
+ self.node.save()
+
inspected_props = {
'memory_mb': '1024',
'local_gb': 10,
@@ -352,7 +427,7 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
spec_set=True, autospec=True)
@mock.patch.object(irmc_inspect, '_get_mac_addresses', spec_set=True,
autospec=True)
- @mock.patch.object(irmc_inspect, 'scci',
+ @mock.patch.object(irmc_inspect.irmc, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
@@ -420,6 +495,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
self.assertEqual(expected_traits, result[2])
def test_inspect_hardware_existing_cap_in_props(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
# Set config flags
gpu_ids = ['0x1000/0x0079', '0x2100/0x0080']
cpu_fpgas = ['0x1000/0x0179', '0x2100/0x0180']
@@ -454,6 +532,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
expected_traits)
def test_inspect_hardware_props_empty_gpu_ids_fpga_ids(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
# Set config flags
gpu_ids = []
cpu_fpgas = []
@@ -478,6 +559,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
expected_traits)
def test_inspect_hardware_props_pci_gpu_devices_return_zero(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
# Set config flags
gpu_ids = ['0x1000/0x0079', '0x2100/0x0080']
cpu_fpgas = ['0x1000/0x0179', '0x2100/0x0180']
@@ -507,6 +591,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
def test_inspect_hardware_props_empty_gpu_ids_fpga_id_sand_existing_cap(
self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
# Set config flags
gpu_ids = []
cpu_fpgas = []
@@ -537,6 +624,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
def test_inspect_hardware_props_gpu_cpu_fpgas_zero_and_existing_cap(
self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
# Set config flags
gpu_ids = ['0x1000/0x0079', '0x2100/0x0080']
cpu_fpgas = ['0x1000/0x0179', '0x2100/0x0180']
@@ -568,6 +658,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
expected_traits)
def test_inspect_hardware_props_trusted_boot_removed(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
# Set config flags
gpu_ids = ['0x1000/0x0079', '0x2100/0x0080']
cpu_fpgas = ['0x1000/0x0179', '0x2100/0x0180']
@@ -598,6 +691,9 @@ class IRMCInspectTestCase(test_common.BaseIRMCTest):
def test_inspect_hardware_props_gpu_and_cpu_fpgas_results_are_different(
self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
# Set config flags
gpu_ids = ['0x1000/0x0079', '0x2100/0x0080']
cpu_fpgas = ['0x1000/0x0179', '0x2100/0x0180']
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_management.py b/ironic/tests/unit/drivers/modules/irmc/test_management.py
index c4b152ae9..9e70e04bf 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_management.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_management.py
@@ -30,6 +30,8 @@ from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.drivers.modules.irmc import management as irmc_management
from ironic.drivers.modules.irmc import power as irmc_power
+from ironic.drivers.modules.redfish import management as redfish_management
+from ironic.drivers.modules.redfish import utils as redfish_util
from ironic.drivers import utils as driver_utils
from ironic.tests.unit.drivers.modules.irmc import test_common
from ironic.tests.unit.drivers import third_party_driver_mock_specs \
@@ -155,26 +157,66 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
task.driver.deploy = fake.FakeDeploy()
self.assertEqual(expected, task.driver.get_properties())
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
- def test_validate(self, mock_drvinfo):
+ def test_validate_ipmi_success(self, mock_drvinfo, redfish_parsedr_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
+ redfish_parsedr_mock.assert_not_called()
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
- def test_validate_fail(self, mock_drvinfo):
+ def test_validate_ipmi_fail(self, mock_drvinfo, redfish_parsedr_mock):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
+ mock_drvinfo.assert_called_once_with(task.node)
+ redfish_parsedr_mock.assert_not_called()
+
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
+ @mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
+ autospec=True)
+ def test_validate_redfish_success(
+ self, mock_drvinfo, redfish_parsedr_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.management.validate(task)
+ redfish_parsedr_mock.assert_called_once_with(task.node)
+ mock_drvinfo.assert_called_once_with(task.node)
+
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
+ @mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
+ autospec=True)
+ def test_validate_redfish_fail(self, mock_drvinfo, redfish_parsedr_mock):
+ side_effect = exception.InvalidParameterValue("Invalid Input")
+ redfish_parsedr_mock.side_effect = side_effect
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ mock_drvinfo.assert_not_called()
+ self.assertRaises(exception.InvalidParameterValue,
+ task.driver.management.validate,
+ task)
+ redfish_parsedr_mock.assert_called_once_with(task.node)
- def test_management_interface_get_supported_boot_devices(self):
+ def test_management_interface_get_supported_boot_devices_ipmi(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM, boot_devices.BIOS,
@@ -182,10 +224,20 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices(task)))
+ def test_management_interface_get_supported_boot_devices_redfish(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ expected = list(redfish_management.BOOT_DEVICE_MAP_REV)
+ self.assertEqual(sorted(expected), sorted(task.driver.management.
+ get_supported_boot_devices(task)))
+
@mock.patch.object(irmc_management.ipmitool, "send_raw", spec_set=True,
autospec=True)
def _test_management_interface_set_boot_device_ok(
self, boot_mode, params, expected_raw_code, send_raw_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
send_raw_mock.return_value = [None, None]
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -197,12 +249,15 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
mock.call(task, "0x00 0x08 0x03 0x08"),
mock.call(task, expected_raw_code)])
- def test_management_interface_set_boot_device_ok_pxe(self):
+ def test_management_interface_set_boot_device_ok_pxe_ipmi(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
params = {'device': boot_devices.PXE, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0x80 0x04 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xa0 0x04 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -216,7 +271,7 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0xc0 0x04 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xe0 0x04 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -226,12 +281,15 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
params,
"0x00 0x08 0x05 0xe0 0x04 0x00 0x00 0x00")
- def test_management_interface_set_boot_device_ok_disk(self):
+ def test_management_interface_set_boot_device_ok_disk_ipmi(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
params = {'device': boot_devices.DISK, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0x80 0x08 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xa0 0x08 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -245,7 +303,7 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0xc0 0x08 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xe0 0x08 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -255,12 +313,15 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
params,
"0x00 0x08 0x05 0xe0 0x08 0x00 0x00 0x00")
- def test_management_interface_set_boot_device_ok_cdrom(self):
+ def test_management_interface_set_boot_device_ok_cdrom_ipmi(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
params = {'device': boot_devices.CDROM, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0x80 0x20 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xa0 0x20 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -274,7 +335,7 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0xc0 0x20 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xe0 0x20 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -284,12 +345,15 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
params,
"0x00 0x08 0x05 0xe0 0x20 0x00 0x00 0x00")
- def test_management_interface_set_boot_device_ok_bios(self):
+ def test_management_interface_set_boot_device_ok_bios_ipmi(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
params = {'device': boot_devices.BIOS, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0x80 0x18 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xa0 0x18 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -303,7 +367,7 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0xc0 0x18 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xe0 0x18 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -313,12 +377,15 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
params,
"0x00 0x08 0x05 0xe0 0x18 0x00 0x00 0x00")
- def test_management_interface_set_boot_device_ok_safe(self):
+ def test_management_interface_set_boot_device_ok_safe_ipmi(self):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
params = {'device': boot_devices.SAFE, 'persistent': False}
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0x80 0x0c 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xa0 0x0c 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -332,7 +399,7 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
self._test_management_interface_set_boot_device_ok(
None,
params,
- "0x00 0x08 0x05 0xc0 0x0c 0x00 0x00 0x00")
+ "0x00 0x08 0x05 0xe0 0x0c 0x00 0x00 0x00")
self._test_management_interface_set_boot_device_ok(
'bios',
params,
@@ -344,7 +411,10 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_management.ipmitool, "send_raw", spec_set=True,
autospec=True)
- def test_management_interface_set_boot_device_ng(self, send_raw_mock):
+ def test_management_interface_set_boot_device_ng_ipmi(self, send_raw_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+
"""uefi mode, next boot only, unknown device."""
send_raw_mock.return_value = [None, None]
@@ -355,11 +425,39 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
task,
"unknown")
+ @mock.patch.object(irmc_management.ipmitool, 'send_raw', autospec=True)
+ @mock.patch.object(redfish_management.RedfishManagement, 'set_boot_device',
+ autospec=True)
+ def test_management_interfase_set_boot_device_success_redfish(
+ self, redfish_set_boot_dev_mock, ipmi_raw_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ ipmi_raw_mock.side_effect = exception.IPMIFailure
+ management_inst = irmc_management.IRMCManagement()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ params = ['pxe', True]
+ management_inst.set_boot_device(task, *params)
+ redfish_set_boot_dev_mock.assert_called_once_with(
+ management_inst, task, *params)
+
+ @mock.patch.object(redfish_management.RedfishManagement, 'set_boot_device',
+ autospec=True)
+ def test_management_interfase_set_boot_device_fail_redfish(
+ self, redfish_set_boot_dev_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ management_inst = irmc_management.IRMCManagement()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ params = [task, 'safe', True]
+ self.assertRaises(exception.InvalidParameterValue,
+ management_inst.set_boot_device, *params)
+ redfish_set_boot_dev_mock.assert_not_called()
+
@mock.patch.object(irmc_management.irmc, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
- def test_management_interface_get_sensors_data_scci_ok(
+ def test_management_interface_get_sensors_data_scci_ok_ipmi(
self, mock_get_irmc_report, mock_scci):
"""'irmc_sensor_method' = 'scci' specified and OK data."""
with open(os.path.join(os.path.dirname(__file__),
@@ -371,6 +469,8 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
mock_scci.get_sensor_data.return_value = fake_xml.find(
"./System/SensorDataRecords")
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
sensor_dict = irmc_management.IRMCManagement().get_sensors_data(
@@ -408,7 +508,58 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
- def test_management_interface_get_sensors_data_scci_ng(
+ def test_management_interface_get_sensors_data_scci_ok_redfish(
+ self, mock_get_irmc_report, mock_scci):
+ """'irmc_sensor_method' = 'scci' specified and OK data."""
+ with open(os.path.join(os.path.dirname(__file__),
+ 'fake_sensors_data_ok.xml'), "r") as report:
+ fake_txt = report.read()
+ fake_xml = ET.fromstring(fake_txt)
+
+ mock_get_irmc_report.return_value = fake_xml
+ mock_scci.get_sensor_data.return_value = fake_xml.find(
+ "./System/SensorDataRecords")
+
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.node.driver_info['irmc_sensor_method'] = 'scci'
+ sensor_dict = irmc_management.IRMCManagement().get_sensors_data(
+ task)
+
+ expected = {
+ 'Fan (4)': {
+ 'FAN1 SYS (29)': {
+ 'Units': 'RPM',
+ 'Sensor ID': 'FAN1 SYS (29)',
+ 'Sensor Reading': '600 RPM'
+ },
+ 'FAN2 SYS (29)': {
+ 'Units': 'None',
+ 'Sensor ID': 'FAN2 SYS (29)',
+ 'Sensor Reading': 'None None'
+ }
+ },
+ 'Temperature (1)': {
+ 'Systemboard 1 (7)': {
+ 'Units': 'degree C',
+ 'Sensor ID': 'Systemboard 1 (7)',
+ 'Sensor Reading': '80 degree C'
+ },
+ 'Ambient (55)': {
+ 'Units': 'degree C',
+ 'Sensor ID': 'Ambient (55)',
+ 'Sensor Reading': '42 degree C'
+ }
+ }
+ }
+ self.assertEqual(expected, sensor_dict)
+
+ @mock.patch.object(irmc_management.irmc, 'scci',
+ spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
+ @mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
+ autospec=True)
+ def test_management_interface_get_sensors_data_scci_ng_ipmi(
self, mock_get_irmc_report, mock_scci):
"""'irmc_sensor_method' = 'scci' specified and NG data."""
with open(os.path.join(os.path.dirname(__file__),
@@ -420,6 +571,33 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
mock_scci.get_sensor_data.return_value = fake_xml.find(
"./System/SensorDataRecords")
+ self.node.set_driver_internal_info('irmc_fw_version', 'iRMC S5/2.00S')
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.node.driver_info['irmc_sensor_method'] = 'scci'
+ sensor_dict = irmc_management.IRMCManagement().get_sensors_data(
+ task)
+
+ self.assertEqual(len(sensor_dict), 0)
+
+ @mock.patch.object(irmc_management.irmc, 'scci',
+ spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
+ @mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
+ autospec=True)
+ def test_management_interface_get_sensors_data_scci_ng_redfish(
+ self, mock_get_irmc_report, mock_scci):
+ """'irmc_sensor_method' = 'scci' specified and NG data."""
+ with open(os.path.join(os.path.dirname(__file__),
+ 'fake_sensors_data_ng.xml'), "r") as report:
+ fake_txt = report.read()
+ fake_xml = ET.fromstring(fake_txt)
+
+ mock_get_irmc_report.return_value = fake_xml
+ mock_scci.get_sensor_data.return_value = fake_xml.find(
+ "./System/SensorDataRecords")
+
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'scci'
sensor_dict = irmc_management.IRMCManagement().get_sensors_data(
@@ -429,16 +607,31 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
@mock.patch.object(ipmitool.IPMIManagement, 'get_sensors_data',
spec_set=True, autospec=True)
- def test_management_interface_get_sensors_data_ipmitool_ok(
+ def test_management_interface_get_sensors_data_ipmitool_ok_ipmi(
self,
get_sensors_data_mock):
"""'irmc_sensor_method' = 'ipmitool' specified."""
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_info['irmc_sensor_method'] = 'ipmitool'
task.driver.management.get_sensors_data(task)
get_sensors_data_mock.assert_called_once_with(
task.driver.management, task)
+ @mock.patch.object(ipmitool.IPMIManagement, 'get_sensors_data',
+ spec_set=True, autospec=True)
+ def test_management_interface_get_sensors_data_ipmitool_ng_redfish(
+ self,
+ get_sensors_data_mock):
+ """'irmc_sensor_method' = 'ipmitool' specified."""
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.node.driver_info['irmc_sensor_method'] = 'ipmitool'
+ self.assertRaises(exception.InvalidParameterValue,
+ task.driver.management.get_sensors_data, task)
+
@mock.patch.object(irmc_common, 'get_irmc_report', spec_set=True,
autospec=True)
def test_management_interface_get_sensors_data_exception(
@@ -459,6 +652,36 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
self.assertEqual("Failed to get sensor data for node %s. "
"Error: Fake Error" % self.node.uuid, str(e))
+ @mock.patch.object(redfish_management.RedfishManagement, 'detect_vendor',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ipmitool.IPMIManagement, 'detect_vendor',
+ spec_set=True, autospec=True)
+ def test_management_interface_detect_vendor_ipmi(self,
+ ipmimgmt_detectv_mock,
+ redfishmgmt_detectv_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+ irmc_mgmt_inst = irmc_management.IRMCManagement()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ irmc_mgmt_inst.detect_vendor(task)
+ ipmimgmt_detectv_mock.assert_called_once_with(irmc_mgmt_inst, task)
+ redfishmgmt_detectv_mock.assert_not_called()
+
+ @mock.patch.object(redfish_management.RedfishManagement, 'detect_vendor',
+ spec_set=True, autospec=True)
+ @mock.patch.object(ipmitool.IPMIManagement, 'detect_vendor',
+ spec_set=True, autospec=True)
+ def test_management_interface_detect_vendor_redfish(
+ self, ipmimgmt_detectv_mock, redfishmgmt_detectv_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ ipmimgmt_detectv_mock.side_effect = exception.IPMIFailure
+ irmc_mgmt_inst = irmc_management.IRMCManagement()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ irmc_mgmt_inst.detect_vendor(task)
+ redfishmgmt_detectv_mock.assert_called_once_with(
+ irmc_mgmt_inst, task)
+
@mock.patch.object(irmc_management.LOG, 'error', spec_set=True,
autospec=True)
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
@@ -500,3 +723,93 @@ class IRMCManagementTestCase(test_common.BaseIRMCTest):
result = task.driver.management.restore_irmc_bios_config(task)
self.assertIsNone(result)
mock_restore_bios.assert_called_once_with(task)
+
+ @mock.patch.object(irmc_common, 'set_irmc_version', autospec=True)
+ @mock.patch.object(irmc_common, 'check_elcm_license', autospec=True)
+ def test_verify_http_s_connection_and_fw_ver_success(self,
+ check_elcm_mock,
+ set_irmc_ver_mock):
+ check_elcm_mock.return_value = {'active': True,
+ 'status_code': 200}
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ irmc_mng = irmc_management.IRMCManagement()
+ irmc_mng.verify_http_https_connection_and_fw_version(task)
+ check_elcm_mock.assert_called_with(task.node)
+ set_irmc_ver_mock.assert_called_with(task)
+
+ @mock.patch.object(irmc_common, 'set_irmc_version', autospec=True)
+ @mock.patch.object(irmc_common, 'check_elcm_license', autospec=True)
+ def test_verify_http_s_connection_and_fw_ver_raise_http_success(
+ self, check_elcm_mock, set_irmc_ver_mock):
+ error_msg_http = ('iRMC establishing connection to REST API '
+ 'failed. Reason: '
+ 'Access to REST API returns unexpected '
+ 'status code. Check driver_info parameter '
+ 'or version of iRMC because iRMC does not '
+ 'support HTTP connection to iRMC REST API '
+ 'since iRMC S6 2.00.')
+
+ check_elcm_mock.return_value = {'active': False,
+ 'status_code': 404}
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ irmc_mng = irmc_management.IRMCManagement()
+
+ task.node.driver_info['irmc_port'] = 80
+ self.assertRaisesRegex(
+ exception.IRMCOperationError,
+ error_msg_http,
+ irmc_mng.verify_http_https_connection_and_fw_version,
+ task)
+ check_elcm_mock.assert_called_with(task.node)
+ set_irmc_ver_mock.assert_not_called()
+
+ @mock.patch.object(irmc_common, 'set_irmc_version', autospec=True)
+ @mock.patch.object(irmc_common, 'check_elcm_license', autospec=True)
+ def test_verify_http_s_connection_and_fw_ver_raise_https_success(
+ self, check_elcm_mock, set_irmc_ver_mock):
+ error_msg_https = ('iRMC establishing connection to REST API '
+ 'failed. Reason: '
+ 'Access to REST API returns unexpected '
+ 'status code. Check driver_info parameter '
+ 'related to iRMC driver')
+
+ check_elcm_mock.return_value = {'active': False,
+ 'status_code': 404}
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ irmc_mng = irmc_management.IRMCManagement()
+ task.node.driver_info['irmc_port'] = 443
+ self.assertRaisesRegex(
+ exception.IRMCOperationError,
+ error_msg_https,
+ irmc_mng.verify_http_https_connection_and_fw_version,
+ task)
+ check_elcm_mock.assert_called_with(task.node)
+ set_irmc_ver_mock.assert_not_called()
+
+ @mock.patch.object(irmc_common, 'set_irmc_version', autospec=True)
+ @mock.patch.object(irmc_common, 'check_elcm_license', autospec=True)
+ def test_verify_http_s_connection_and_fw_ver_fail_invalid(
+ self, check_elcm_mock, set_irmc_ver_mock):
+ check_elcm_mock.side_effect = exception.InvalidParameterValue
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ irmc_mng = irmc_management.IRMCManagement()
+ self.assertRaises(
+ exception.IRMCOperationError,
+ irmc_mng.verify_http_https_connection_and_fw_version,
+ task)
+ check_elcm_mock.assert_called_with(task.node)
+
+ @mock.patch.object(irmc_common, 'set_irmc_version', autospec=True)
+ @mock.patch.object(irmc_common, 'check_elcm_license', autospec=True)
+ def test_verify_http_s_connection_and_fw_ver_fail_missing(
+ self, check_elcm_mock, set_irmc_ver_mock):
+ check_elcm_mock.side_effect = exception.MissingParameterValue
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ irmc_mng = irmc_management.IRMCManagement()
+ self.assertRaises(
+ exception.IRMCOperationError,
+ irmc_mng.verify_http_https_connection_and_fw_version,
+ task)
+ check_elcm_mock.assert_called_with(task.node)
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_power.py b/ironic/tests/unit/drivers/modules/irmc/test_power.py
index c4142202c..c2509af79 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_power.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_power.py
@@ -24,6 +24,8 @@ from ironic.conductor import task_manager
from ironic.drivers.modules.irmc import boot as irmc_boot
from ironic.drivers.modules.irmc import common as irmc_common
from ironic.drivers.modules.irmc import power as irmc_power
+from ironic.drivers.modules.redfish import power as redfish_power
+from ironic.drivers.modules.redfish import utils as redfish_util
from ironic.tests.unit.drivers.modules.irmc import test_common
@@ -289,17 +291,32 @@ class IRMCPowerTestCase(test_common.BaseIRMCTest):
for prop in irmc_common.COMMON_PROPERTIES:
self.assertIn(prop, properties)
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
- def test_validate(self, mock_drvinfo):
+ def test_validate_default(self, mock_drvinfo, redfish_parsedr_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
+ redfish_parsedr_mock.assert_not_called()
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
@mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
autospec=True)
- def test_validate_fail(self, mock_drvinfo):
+ def test_validate_ipmi(self, mock_drvinfo, redfish_parsedr_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.power.validate(task)
+ mock_drvinfo.assert_called_once_with(task.node)
+ redfish_parsedr_mock.assert_not_called()
+
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
+ @mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
+ autospec=True)
+ def test_validate_fail_ipmi(self, mock_drvinfo, redfish_parsedr_mock):
side_effect = exception.InvalidParameterValue("Invalid Input")
mock_drvinfo.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
@@ -307,10 +324,40 @@ class IRMCPowerTestCase(test_common.BaseIRMCTest):
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate,
task)
+ redfish_parsedr_mock.assert_not_called()
+
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
+ @mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
+ autospec=True)
+ def test_validate_redfish(self, mock_drvinfo, redfish_parsedr_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.power.validate(task)
+ mock_drvinfo.assert_called_once_with(task.node)
+ redfish_parsedr_mock.assert_called_once_with(task.node)
+ @mock.patch.object(redfish_util, 'parse_driver_info', autospec=True)
+ @mock.patch.object(irmc_common, 'parse_driver_info', spec_set=True,
+ autospec=True)
+ def test_validate_fail_redfish(self, mock_drvinfo, redfish_parsedr_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ side_effect = exception.InvalidParameterValue("Invalid Input")
+ redfish_parsedr_mock.side_effect = side_effect
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertRaises(exception.InvalidParameterValue,
+ task.driver.power.validate,
+ task)
+ mock_drvinfo.assert_called_once_with(task.node)
+
+ @mock.patch.object(redfish_power.RedfishPower, 'get_power_state',
+ autospec=True)
@mock.patch('ironic.drivers.modules.irmc.power.ipmitool.IPMIPower',
spec_set=True, autospec=True)
- def test_get_power_state(self, mock_IPMIPower):
+ def test_get_power_state_default(self, mock_IPMIPower, redfish_getpw_mock):
ipmi_power = mock_IPMIPower.return_value
ipmi_power.get_power_state.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
@@ -318,6 +365,41 @@ class IRMCPowerTestCase(test_common.BaseIRMCTest):
self.assertEqual(states.POWER_ON,
task.driver.power.get_power_state(task))
ipmi_power.get_power_state.assert_called_once_with(task)
+ redfish_getpw_mock.assert_not_called()
+
+ @mock.patch.object(redfish_power.RedfishPower, 'get_power_state',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.irmc.power.ipmitool.IPMIPower',
+ spec_set=True, autospec=True)
+ def test_get_power_state_ipmi(self, mock_IPMIPower, redfish_getpw_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', True)
+ self.node.save()
+ ipmi_power = mock_IPMIPower.return_value
+ ipmi_power.get_power_state.return_value = states.POWER_ON
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertEqual(states.POWER_ON,
+ task.driver.power.get_power_state(task))
+ ipmi_power.get_power_state.assert_called_once_with(task)
+ redfish_getpw_mock.assert_not_called()
+
+ @mock.patch.object(redfish_power.RedfishPower, 'get_power_state',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.irmc.power.ipmitool.IPMIPower',
+ spec_set=True, autospec=True)
+ def test_get_power_state_redfish(self, mock_IPMIPower, redfish_getpw_mock):
+ self.node.set_driver_internal_info('irmc_ipmi_succeed', False)
+ self.node.save()
+ ipmipw_instance = mock_IPMIPower()
+ ipmipw_instance.get_power_state.side_effect = exception.IPMIFailure
+ redfish_getpw_mock.return_value = states.POWER_ON
+ irmc_power_inst = irmc_power.IRMCPower()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertEqual(states.POWER_ON,
+ irmc_power_inst.get_power_state(task))
+ ipmipw_instance.get_power_state.assert_called()
+ redfish_getpw_mock.assert_called_once_with(irmc_power_inst, task)
@mock.patch.object(irmc_power, '_set_power_state', spec_set=True,
autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/network/test_common.py b/ironic/tests/unit/drivers/modules/network/test_common.py
index 555024216..7b907ad22 100644
--- a/ironic/tests/unit/drivers/modules/network/test_common.py
+++ b/ironic/tests/unit/drivers/modules/network/test_common.py
@@ -1086,7 +1086,9 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
autospec=True)
def test_port_changed_client_id_fail(self, dhcp_update_mock):
self.port.internal_info = {'tenant_vif_port_id': 'fake-id'}
- self.port.extra = {'client-id': 'fake2'}
+ self.port.extra = {'client-id': 'fake3'}
+ # NOTE(TheJulia): Does not save, because it attempts to figure
+ # out what has changed as part of the test.
dhcp_update_mock.side_effect = (
exception.FailedToUpdateDHCPOptOnPort(port_id=self.port.uuid))
with task_manager.acquire(self.context, self.node.id) as task:
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_raid.py b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
index dfb3c1473..843be735c 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_raid.py
@@ -336,6 +336,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(mock_node_power_action.call_count, 0)
self.assertEqual(mock_build_agent_options.call_count, 0)
self.assertEqual(mock_prepare_ramdisk.call_count, 0)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_configs'))
self.assertEqual(
[{'controller': 'RAID controller 1',
'id': '1',
@@ -1066,6 +1068,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(mock_node_power_action.call_count, 0)
self.assertEqual(mock_build_agent_options.call_count, 0)
self.assertEqual(mock_prepare_ramdisk.call_count, 0)
+ self.assertIsNone(
+ task.node.driver_internal_info.get('raid_configs'))
self.assertEqual([], task.node.raid_config['logical_disks'])
self.assertNotEqual(
last_updated, task.node.raid_config['last_updated'])
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_utils.py b/ironic/tests/unit/drivers/modules/redfish/test_utils.py
index ca8aba9da..01b7089c7 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_utils.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_utils.py
@@ -252,6 +252,7 @@ class RedfishUtilsAuthTestCase(db_base.DbTestCase):
redfish_utils.get_system(self.node)
redfish_utils.get_system(self.node)
self.assertEqual(1, mock_sushy.call_count)
+ self.assertEqual(len(redfish_utils.SessionCache._sessions), 1)
@mock.patch.object(sushy, 'Sushy', autospec=True)
def test_ensure_new_session_address(self, mock_sushy):
@@ -270,6 +271,21 @@ class RedfishUtilsAuthTestCase(db_base.DbTestCase):
self.assertEqual(2, mock_sushy.call_count)
@mock.patch.object(sushy, 'Sushy', autospec=True)
+ def test_ensure_new_session_password(self, mock_sushy):
+ d_info = self.node.driver_info
+ d_info['redfish_username'] = 'foo'
+ d_info['redfish_password'] = 'bar'
+ self.node.driver_info = d_info
+ self.node.save()
+ redfish_utils.get_system(self.node)
+ d_info['redfish_password'] = 'foo'
+ self.node.driver_info = d_info
+ self.node.save()
+ redfish_utils.SessionCache._sessions = collections.OrderedDict()
+ redfish_utils.get_system(self.node)
+ self.assertEqual(2, mock_sushy.call_count)
+
+ @mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.'
'SessionCache.AUTH_CLASSES', autospec=True)
@mock.patch('ironic.drivers.modules.redfish.utils.SessionCache._sessions',
diff --git a/ironic/tests/unit/drivers/modules/test_image_utils.py b/ironic/tests/unit/drivers/modules/test_image_utils.py
index 753452f5d..b6c572125 100644
--- a/ironic/tests/unit/drivers/modules/test_image_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_image_utils.py
@@ -105,73 +105,96 @@ class RedfishImageHandlerTestCase(db_base.DbTestCase):
mock_swift_api.delete_object.assert_called_once_with(
'ironic_redfish_container', object_name)
+ @mock.patch.object(utils, 'execute', autospec=True)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch.object(image_utils, 'shutil', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'mkdir', autospec=True)
def test_publish_image_local_link(
- self, mock_mkdir, mock_link, mock_shutil, mock_chmod):
+ self, mock_mkdir, mock_link, mock_shutil, mock_chmod,
+ mock_execute):
self.config(use_swift=False, group='redfish')
self.config(http_url='http://localhost', group='deploy')
img_handler_obj = image_utils.ImageHandler(self.node.driver)
-
url = img_handler_obj.publish_image('file.iso', 'boot.iso')
-
self.assertEqual(
'http://localhost/redfish/boot.iso', url)
+ mock_mkdir.assert_called_once_with('/httpboot/redfish', 0o755)
+ mock_link.assert_called_once_with(
+ 'file.iso', '/httpboot/redfish/boot.iso')
+ mock_chmod.assert_called_once_with('file.iso', 0o644)
+ mock_execute.assert_called_once_with(
+ '/usr/sbin/restorecon', '-i', '-R', 'v', '/httpboot/redfish')
+ @mock.patch.object(utils, 'execute', autospec=True)
+ @mock.patch.object(os, 'chmod', autospec=True)
+ @mock.patch.object(image_utils, 'shutil', autospec=True)
+ @mock.patch.object(os, 'link', autospec=True)
+ @mock.patch.object(os, 'mkdir', autospec=True)
+ def test_publish_image_local_link_no_restorecon(
+ self, mock_mkdir, mock_link, mock_shutil, mock_chmod,
+ mock_execute):
+ self.config(use_swift=False, group='redfish')
+ self.config(http_url='http://localhost', group='deploy')
+ img_handler_obj = image_utils.ImageHandler(self.node.driver)
+ url = img_handler_obj.publish_image('file.iso', 'boot.iso')
+ self.assertEqual(
+ 'http://localhost/redfish/boot.iso', url)
mock_mkdir.assert_called_once_with('/httpboot/redfish', 0o755)
mock_link.assert_called_once_with(
'file.iso', '/httpboot/redfish/boot.iso')
mock_chmod.assert_called_once_with('file.iso', 0o644)
+ mock_execute.return_value = FileNotFoundError
+ mock_shutil.assert_not_called()
+ @mock.patch.object(utils, 'execute', autospec=True)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch.object(image_utils, 'shutil', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'mkdir', autospec=True)
def test_publish_image_external_ip(
- self, mock_mkdir, mock_link, mock_shutil, mock_chmod):
+ self, mock_mkdir, mock_link, mock_shutil, mock_chmod,
+ mock_execute):
self.config(use_swift=False, group='redfish')
self.config(http_url='http://localhost',
external_http_url='http://non-local.host',
group='deploy')
img_handler_obj = image_utils.ImageHandler(self.node.driver)
-
url = img_handler_obj.publish_image('file.iso', 'boot.iso')
-
self.assertEqual(
'http://non-local.host/redfish/boot.iso', url)
-
mock_mkdir.assert_called_once_with('/httpboot/redfish', 0o755)
mock_link.assert_called_once_with(
'file.iso', '/httpboot/redfish/boot.iso')
mock_chmod.assert_called_once_with('file.iso', 0o644)
+ mock_execute.assert_called_once_with(
+ '/usr/sbin/restorecon', '-i', '-R', 'v', '/httpboot/redfish')
+ @mock.patch.object(utils, 'execute', autospec=True)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch.object(image_utils, 'shutil', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'mkdir', autospec=True)
def test_publish_image_external_ip_node_override(
- self, mock_mkdir, mock_link, mock_shutil, mock_chmod):
+ self, mock_mkdir, mock_link, mock_shutil, mock_chmod,
+ mock_execute):
self.config(use_swift=False, group='redfish')
self.config(http_url='http://localhost',
external_http_url='http://non-local.host',
group='deploy')
img_handler_obj = image_utils.ImageHandler(self.node.driver)
self.node.driver_info["external_http_url"] = "http://node.override.url"
-
override_url = self.node.driver_info.get("external_http_url")
-
url = img_handler_obj.publish_image('file.iso', 'boot.iso',
override_url)
-
self.assertEqual(
'http://node.override.url/redfish/boot.iso', url)
-
mock_mkdir.assert_called_once_with('/httpboot/redfish', 0o755)
mock_link.assert_called_once_with(
'file.iso', '/httpboot/redfish/boot.iso')
mock_chmod.assert_called_once_with('file.iso', 0o644)
+ mock_execute.assert_called_once_with(
+ '/usr/sbin/restorecon', '-i', '-R', 'v', '/httpboot/redfish')
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch.object(image_utils, 'shutil', autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/test_inspect_utils.py b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
index 3c636a1b1..7cb451473 100644
--- a/ironic/tests/unit/drivers/modules/test_inspect_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
@@ -17,15 +17,20 @@
from unittest import mock
from oslo_utils import importutils
+import swiftclient.exceptions
+from ironic.common import context as ironic_context
from ironic.common import exception
+from ironic.common import swift
from ironic.conductor import task_manager
from ironic.drivers.modules import inspect_utils as utils
+from ironic.drivers.modules import inspector
from ironic import objects
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
sushy = importutils.try_import('sushy')
+CONF = inspector.CONF
@mock.patch('time.sleep', lambda sec: None)
@@ -88,3 +93,227 @@ class InspectFunctionTestCase(db_base.DbTestCase):
mock.call(task.context, **port_dict2)]
port_mock.assert_has_calls(expected_calls, any_order=True)
self.assertEqual(2, port_mock.return_value.create.call_count)
+
+
+class SwiftCleanUp(db_base.DbTestCase):
+
+ def setUp(self):
+ super(SwiftCleanUp, self).setUp()
+ self.node = obj_utils.create_test_node(self.context)
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_clean_up_swift_entries(self, swift_api_mock):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ swift_obj_mock = swift_api_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ utils.clean_up_swift_entries(task)
+ object_name = 'inspector_data-' + str(self.node.uuid)
+ swift_obj_mock.delete_object.assert_has_calls([
+ mock.call(object_name + '-inventory', container),
+ mock.call(object_name + '-plugin', container)])
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_clean_up_swift_entries_with_404_exception(self, swift_api_mock):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ swift_obj_mock = swift_api_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ swift_obj_mock.delete_object.side_effect = [
+ swiftclient.exceptions.ClientException("not found",
+ http_status=404),
+ swiftclient.exceptions.ClientException("not found",
+ http_status=404)]
+ utils.clean_up_swift_entries(task)
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_clean_up_swift_entries_with_fail_exception(self, swift_api_mock):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ swift_obj_mock = swift_api_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ swift_obj_mock.delete_object.side_effect = [
+ swiftclient.exceptions.ClientException("failed",
+ http_status=417),
+ swiftclient.exceptions.ClientException("not found",
+ http_status=404)]
+ self.assertRaises(exception.SwiftObjectStillExists,
+ utils.clean_up_swift_entries, task)
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test_clean_up_swift_entries_with_fail_exceptions(self, swift_api_mock):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ swift_obj_mock = swift_api_mock.return_value
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ swift_obj_mock.delete_object.side_effect = [
+ swiftclient.exceptions.ClientException("failed",
+ http_status=417),
+ swiftclient.exceptions.ClientException("failed",
+ http_status=417)]
+ self.assertRaises((exception.SwiftObjectStillExists,
+ exception.SwiftObjectStillExists),
+ utils.clean_up_swift_entries, task)
+
+
+class IntrospectionDataStorageFunctionsTestCase(db_base.DbTestCase):
+ fake_inventory_data = {"cpu": "amd"}
+ fake_plugin_data = {"disks": [{"name": "/dev/vda"}]}
+
+ def setUp(self):
+ super(IntrospectionDataStorageFunctionsTestCase, self).setUp()
+ self.node = obj_utils.create_test_node(self.context)
+
+ def test_store_introspection_data_db(self):
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
+ fake_introspection_data = {'inventory': self.fake_inventory_data,
+ **self.fake_plugin_data}
+ fake_context = ironic_context.RequestContext()
+ utils.store_introspection_data(self.node, fake_introspection_data,
+ fake_context)
+ stored = objects.NodeInventory.get_by_node_id(self.context,
+ self.node.id)
+ self.assertEqual(self.fake_inventory_data, stored["inventory_data"])
+ self.assertEqual(self.fake_plugin_data, stored["plugin_data"])
+
+ @mock.patch.object(utils, '_store_introspection_data_in_swift',
+ autospec=True)
+ def test_store_introspection_data_swift(self, mock_store_data):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ CONF.set_override(
+ 'swift_data_container', 'introspection_data',
+ group='inventory')
+ fake_introspection_data = {
+ "inventory": self.fake_inventory_data, **self.fake_plugin_data}
+ fake_context = ironic_context.RequestContext()
+ utils.store_introspection_data(self.node, fake_introspection_data,
+ fake_context)
+ mock_store_data.assert_called_once_with(
+ self.node.uuid, inventory_data=self.fake_inventory_data,
+ plugin_data=self.fake_plugin_data)
+
+ def test_store_introspection_data_nostore(self):
+ CONF.set_override('data_backend', 'none', group='inventory')
+ fake_introspection_data = {
+ "inventory": self.fake_inventory_data, **self.fake_plugin_data}
+ fake_context = ironic_context.RequestContext()
+ ret = utils.store_introspection_data(self.node,
+ fake_introspection_data,
+ fake_context)
+ self.assertIsNone(ret)
+
+ def test__node_inventory_convert(self):
+ required_output = {"inventory": self.fake_inventory_data,
+ "plugin_data": self.fake_plugin_data}
+ input_given = {}
+ input_given["inventory_data"] = self.fake_inventory_data
+ input_given["plugin_data"] = self.fake_plugin_data
+ input_given["booom"] = "boom"
+ ret = utils._node_inventory_convert(input_given)
+ self.assertEqual(required_output, ret)
+
+ @mock.patch.object(utils, '_node_inventory_convert', autospec=True)
+ @mock.patch.object(objects, 'NodeInventory', spec_set=True, autospec=True)
+ def test_get_introspection_data_db(self, mock_inventory, mock_convert):
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
+ fake_introspection_data = {'inventory': self.fake_inventory_data,
+ 'plugin_data': self.fake_plugin_data}
+ fake_context = ironic_context.RequestContext()
+ mock_inventory.get_by_node_id.return_value = fake_introspection_data
+ utils.get_introspection_data(self.node, fake_context)
+ mock_convert.assert_called_once_with(fake_introspection_data)
+
+ @mock.patch.object(objects, 'NodeInventory', spec_set=True, autospec=True)
+ def test_get_introspection_data_db_exception(self, mock_inventory):
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
+ fake_context = ironic_context.RequestContext()
+ mock_inventory.get_by_node_id.side_effect = [
+ exception.NodeInventoryNotFound(self.node.uuid)]
+ self.assertRaises(
+ exception.NodeInventoryNotFound, utils.get_introspection_data,
+ self.node, fake_context)
+
+ @mock.patch.object(utils, '_get_introspection_data_from_swift',
+ autospec=True)
+ def test_get_introspection_data_swift(self, mock_get_data):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ CONF.set_override(
+ 'swift_data_container', 'introspection_data',
+ group='inventory')
+ fake_context = ironic_context.RequestContext()
+ utils.get_introspection_data(self.node, fake_context)
+ mock_get_data.assert_called_once_with(
+ self.node.uuid)
+
+ @mock.patch.object(utils, '_get_introspection_data_from_swift',
+ autospec=True)
+ def test_get_introspection_data_swift_exception(self, mock_get_data):
+ CONF.set_override('data_backend', 'swift', group='inventory')
+ CONF.set_override(
+ 'swift_data_container', 'introspection_data',
+ group='inventory')
+ fake_context = ironic_context.RequestContext()
+ mock_get_data.side_effect = exception.SwiftObjectNotFoundError()
+ self.assertRaises(
+ exception.NodeInventoryNotFound, utils.get_introspection_data,
+ self.node, fake_context)
+
+ def test_get_introspection_data_nostore(self):
+ CONF.set_override('data_backend', 'none', group='inventory')
+ fake_context = ironic_context.RequestContext()
+ self.assertRaises(
+ exception.NodeInventoryNotFound, utils.get_introspection_data,
+ self.node, fake_context)
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test__store_introspection_data_in_swift(self, swift_api_mock):
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ utils._store_introspection_data_in_swift(
+ self.node.uuid, self.fake_inventory_data, self.fake_plugin_data)
+ swift_obj_mock = swift_api_mock.return_value
+ object_name = 'inspector_data-' + str(self.node.uuid)
+ swift_obj_mock.create_object_from_data.assert_has_calls([
+ mock.call(object_name + '-inventory', self.fake_inventory_data,
+ container),
+ mock.call(object_name + '-plugin', self.fake_plugin_data,
+ container)])
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test__get_introspection_data_from_swift(self, swift_api_mock):
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ swift_obj_mock = swift_api_mock.return_value
+ swift_obj_mock.get_object.side_effect = [
+ self.fake_inventory_data,
+ self.fake_plugin_data
+ ]
+ ret = utils._get_introspection_data_from_swift(self.node.uuid)
+ req_ret = {"inventory": self.fake_inventory_data,
+ "plugin_data": self.fake_plugin_data}
+ self.assertEqual(req_ret, ret)
+
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ def test__get_introspection_data_from_swift_exception(self,
+ swift_api_mock):
+ container = 'introspection_data'
+ CONF.set_override('swift_data_container', container, group='inventory')
+ swift_obj_mock = swift_api_mock.return_value
+ swift_obj_mock.get_object.side_effect = [
+ exception.SwiftOperationError,
+ self.fake_plugin_data
+ ]
+ self.assertRaises(exception.SwiftObjectNotFoundError,
+ utils._get_introspection_data_from_swift,
+ self.node.uuid)
diff --git a/ironic/tests/unit/drivers/modules/test_inspector.py b/ironic/tests/unit/drivers/modules/test_inspector.py
index 09d70eba9..75ccc3ebf 100644
--- a/ironic/tests/unit/drivers/modules/test_inspector.py
+++ b/ironic/tests/unit/drivers/modules/test_inspector.py
@@ -27,7 +27,6 @@ from ironic.drivers.modules.redfish import utils as redfish_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
-
CONF = inspector.CONF
@@ -552,6 +551,47 @@ class CheckStatusTestCase(BaseTestCase):
self.task)
self.driver.boot.clean_up_ramdisk.assert_called_once_with(self.task)
+ @mock.patch.object(inspect_utils, 'store_introspection_data',
+ autospec=True)
+ def test_status_ok_store_inventory(self, mock_store_data, mock_client):
+ mock_get = mock_client.return_value.get_introspection
+ mock_get.return_value = mock.Mock(is_finished=True,
+ error=None,
+ spec=['is_finished', 'error'])
+ fake_introspection_data = {
+ "inventory": {"cpu": "amd"}, "disks": [{"name": "/dev/vda"}]}
+ mock_get_data = mock_client.return_value.get_introspection_data
+ mock_get_data.return_value = fake_introspection_data
+ inspector._check_status(self.task)
+ mock_get.assert_called_once_with(self.node.uuid)
+ mock_get_data.assert_called_once_with(self.node.uuid, processed=True)
+ mock_store_data.assert_called_once_with(self.node,
+ fake_introspection_data,
+ self.task.context)
+
+ def test_status_ok_store_inventory_nostore(self, mock_client):
+ CONF.set_override('data_backend', 'none', group='inventory')
+ mock_get = mock_client.return_value.get_introspection
+ mock_get.return_value = mock.Mock(is_finished=True,
+ error=None,
+ spec=['is_finished', 'error'])
+ mock_get_data = mock_client.return_value.get_introspection_data
+ inspector._check_status(self.task)
+ mock_get.assert_called_once_with(self.node.uuid)
+ mock_get_data.assert_not_called()
+
+ def test_status_error_dont_store_inventory(self, mock_client):
+ CONF.set_override('data_backend', 'database',
+ group='inventory')
+ mock_get = mock_client.return_value.get_introspection
+ mock_get.return_value = mock.Mock(is_finished=True,
+ error='boom',
+ spec=['is_finished', 'error'])
+ mock_get_data = mock_client.return_value.get_introspection_data
+ inspector._check_status(self.task)
+ mock_get.assert_called_once_with(self.node.uuid)
+ mock_get_data.assert_not_called()
+
@mock.patch('ironic.drivers.modules.inspector._get_client', autospec=True)
class InspectHardwareAbortTestCase(BaseTestCase):
@@ -564,4 +604,5 @@ class InspectHardwareAbortTestCase(BaseTestCase):
mock_abort = mock_client.return_value.abort_introspection
mock_abort.side_effect = RuntimeError('boom')
self.assertRaises(RuntimeError, self.iface.abort, self.task)
+
mock_abort.assert_called_once_with(self.node.uuid)
diff --git a/ironic/tests/unit/drivers/modules/test_ipmitool.py b/ironic/tests/unit/drivers/modules/test_ipmitool.py
index b982e0cb2..016b9d6ed 100644
--- a/ironic/tests/unit/drivers/modules/test_ipmitool.py
+++ b/ironic/tests/unit/drivers/modules/test_ipmitool.py
@@ -3255,6 +3255,11 @@ class IPMIToolShellinaboxTestCase(db_base.DbTestCase):
mock_start.return_value = None
mock_info.return_value = {'port': None}
mock_alloc.return_value = 1234
+ # Ensure allocated port is not re-used
+ dii = self.node.driver_internal_info
+ dii['allocated_ipmi_terminal_port'] = 4321
+ self.node.driver_internal_info = dii
+ self.node.save()
with task_manager.acquire(self.context,
self.node.uuid) as task:
@@ -3468,6 +3473,11 @@ class IPMIToolSocatDriverTestCase(IPMIToolShellinaboxTestCase):
mock_start.return_value = None
mock_info.return_value = {'port': None}
mock_alloc.return_value = 1234
+ # Ensure allocated port is not re-used
+ dii = self.node.driver_internal_info
+ dii['allocated_ipmi_terminal_port'] = 4321
+ self.node.driver_internal_info = dii
+ self.node.save()
with task_manager.acquire(self.context,
self.node.uuid) as task:
diff --git a/ironic/tests/unit/drivers/modules/test_snmp.py b/ironic/tests/unit/drivers/modules/test_snmp.py
index 6bdd2da5a..5391d7ac5 100644
--- a/ironic/tests/unit/drivers/modules/test_snmp.py
+++ b/ironic/tests/unit/drivers/modules/test_snmp.py
@@ -327,6 +327,34 @@ class SNMPValidateParametersTestCase(db_base.DbTestCase):
info = snmp._parse_driver_info(node)
self.assertEqual('teltronix', info['driver'])
+ def test__parse_driver_info_servertech_sentry3(self):
+ # Make sure the servertech_sentry3 driver type is parsed.
+ info = db_utils.get_test_snmp_info(snmp_driver='servertech_sentry3')
+ node = self._get_test_node(info)
+ info = snmp._parse_driver_info(node)
+ self.assertEqual('servertech_sentry3', info['driver'])
+
+ def test__parse_driver_info_servertech_sentry4(self):
+ # Make sure the servertech_sentry4 driver type is parsed.
+ info = db_utils.get_test_snmp_info(snmp_driver='servertech_sentry4')
+ node = self._get_test_node(info)
+ info = snmp._parse_driver_info(node)
+ self.assertEqual('servertech_sentry4', info['driver'])
+
+ def test__parse_driver_info_raritan_pdu2(self):
+ # Make sure the raritan_pdu2 driver type is parsed.
+ info = db_utils.get_test_snmp_info(snmp_driver='raritan_pdu2')
+ node = self._get_test_node(info)
+ info = snmp._parse_driver_info(node)
+ self.assertEqual('raritan_pdu2', info['driver'])
+
+ def test__parse_driver_info_vertivgeist_pdu(self):
+ # Make sure the vertivgeist_pdu driver type is parsed.
+ info = db_utils.get_test_snmp_info(snmp_driver='vertivgeist_pdu')
+ node = self._get_test_node(info)
+ info = snmp._parse_driver_info(node)
+ self.assertEqual('vertivgeist_pdu', info['driver'])
+
def test__parse_driver_info_snmp_v1(self):
# Make sure SNMPv1 is parsed with a community string.
info = db_utils.get_test_snmp_info(snmp_version='1',
@@ -733,7 +761,7 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
driver = snmp._get_driver(self.node)
mock_client.get.return_value = driver.value_power_on
pstate = driver.power_on()
- mock_sleep.assert_called_once_with(1)
+ self.assertTrue(mock_sleep.called)
mock_client.set.assert_called_once_with(driver._snmp_oid(),
driver.value_power_on)
mock_client.get.assert_called_once_with(driver._snmp_oid())
@@ -747,7 +775,7 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
driver = snmp._get_driver(self.node)
mock_client.get.return_value = driver.value_power_off
pstate = driver.power_off()
- mock_sleep.assert_called_once_with(1)
+ self.assertTrue(mock_sleep.called)
mock_client.set.assert_called_once_with(driver._snmp_oid(),
driver.value_power_off)
mock_client.get.assert_called_once_with(driver._snmp_oid())
@@ -1260,6 +1288,58 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
def test_apc_rackpdu_power_reset(self, mock_get_client):
self._test_simple_device_power_reset('apc_rackpdu', mock_get_client)
+ def test_raritan_pdu2_snmp_objects(self, mock_get_client):
+ # Ensure the correct SNMP object OIDs and values are used by the
+ # Raritan PDU2 driver
+ self._update_driver_info(snmp_driver="raritan_pdu2",
+ snmp_outlet="6")
+ driver = snmp._get_driver(self.node)
+ oid = (1, 3, 6, 1, 4, 1, 13742, 6, 4, 1, 2, 1, 2, 1, 6)
+ action = (2,)
+
+ self.assertEqual(oid, driver._snmp_oid(action))
+ self.assertEqual(1, driver.value_power_on)
+ self.assertEqual(0, driver.value_power_off)
+
+ def test_servertech_sentry3_snmp_objects(self, mock_get_client):
+ # Ensure the correct SNMP object OIDs and values are used by the
+ # ServerTech Sentry3 driver
+ self._update_driver_info(snmp_driver="servertech_sentry3",
+ snmp_outlet="6")
+ driver = snmp._get_driver(self.node)
+ oid = (1, 3, 6, 1, 4, 1, 1718, 3, 2, 3, 1, 5, 1, 1, 6)
+ action = (5,)
+
+ self.assertEqual(oid, driver._snmp_oid(action))
+ self.assertEqual(1, driver.value_power_on)
+ self.assertEqual(2, driver.value_power_off)
+
+ def test_servertech_sentry4_snmp_objects(self, mock_get_client):
+ # Ensure the correct SNMP object OIDs and values are used by the
+ # ServerTech Sentry4 driver
+ self._update_driver_info(snmp_driver="servertech_sentry4",
+ snmp_outlet="6")
+ driver = snmp._get_driver(self.node)
+ oid = (1, 3, 6, 1, 4, 1, 1718, 4, 1, 8, 5, 1, 2, 1, 1, 6)
+ action = (2,)
+
+ self.assertEqual(oid, driver._snmp_oid(action))
+ self.assertEqual(1, driver.value_power_on)
+ self.assertEqual(2, driver.value_power_off)
+
+ def test_vertivgeist_pdu_snmp_objects(self, mock_get_client):
+ # Ensure the correct SNMP object OIDs and values are used by the
+ # Vertiv Geist PDU driver
+ self._update_driver_info(snmp_driver="vertivgeist_pdu",
+ snmp_outlet="6")
+ driver = snmp._get_driver(self.node)
+ oid = (1, 3, 6, 1, 4, 1, 21239, 5, 2, 3, 5, 1, 4, 6)
+ action = (4,)
+
+ self.assertEqual(oid, driver._snmp_oid(action))
+ self.assertEqual(2, driver.value_power_on)
+ self.assertEqual(4, driver.value_power_off)
+
def test_aten_snmp_objects(self, mock_get_client):
# Ensure the correct SNMP object OIDs and values are used by the
# Aten driver
diff --git a/ironic/tests/unit/drivers/pxe_grub_config.template b/ironic/tests/unit/drivers/pxe_grub_config.template
index c4410b489..95716cb16 100644
--- a/ironic/tests/unit/drivers/pxe_grub_config.template
+++ b/ironic/tests/unit/drivers/pxe_grub_config.template
@@ -16,3 +16,7 @@ menuentry "boot_whole_disk" {
linuxefi chain.c32 mbr:(( DISK_IDENTIFIER ))
}
+menuentry "boot_anaconda" {
+ linuxefi /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/kernel text test_param inst.ks=http://fake/ks.cfg inst.stage2=http://fake/stage2
+ initrdefi /tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/ramdisk
+}
diff --git a/ironic/tests/unit/drivers/test_fake_hardware.py b/ironic/tests/unit/drivers/test_fake_hardware.py
index 70460a6a4..637f52bf9 100644
--- a/ironic/tests/unit/drivers/test_fake_hardware.py
+++ b/ironic/tests/unit/drivers/test_fake_hardware.py
@@ -17,6 +17,8 @@
"""Test class for Fake driver."""
+import time
+from unittest import mock
from ironic.common import boot_devices
from ironic.common import boot_modes
@@ -26,6 +28,7 @@ from ironic.common import indicator_states
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers import base as driver_base
+from ironic.drivers.modules import fake
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
@@ -164,3 +167,29 @@ class FakeHardwareTestCase(db_base.DbTestCase):
self.assertEqual({}, self.driver.inspect.get_properties())
self.driver.inspect.validate(self.task)
self.driver.inspect.inspect_hardware(self.task)
+
+ def test_parse_sleep_range(self):
+ self.assertEqual((0, 0), fake.parse_sleep_range('0'))
+ self.assertEqual((0, 0), fake.parse_sleep_range(''))
+ self.assertEqual((1, 1), fake.parse_sleep_range('1'))
+ self.assertEqual((1, 10), fake.parse_sleep_range('1,10'))
+ self.assertEqual((10, 20), fake.parse_sleep_range('10, 20'))
+
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_sleep_zero(self, mock_sleep):
+ fake.sleep("0")
+ mock_sleep.assert_not_called()
+
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_sleep_one(self, mock_sleep):
+ fake.sleep("1")
+ mock_sleep.assert_called_once_with(1)
+
+ @mock.patch.object(time, 'sleep', autospec=True)
+ def test_sleep_range(self, mock_sleep):
+ for i in range(100):
+ fake.sleep("1,10")
+ for call in mock_sleep.call_args_list:
+ v = call[0][0]
+ self.assertGreaterEqual(v, 1)
+ self.assertLessEqual(v, 10)
diff --git a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
index b58504fbe..78939c91a 100644
--- a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
+++ b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py
@@ -95,9 +95,11 @@ SCCICLIENT_IRMC_SCCI_SPEC = (
'get_virtual_fd_set_params_cmd',
'get_essential_properties',
'get_capabilities_properties',
+ 'get_irmc_version_str',
)
SCCICLIENT_IRMC_ELCM_SPEC = (
'backup_bios_config',
+ 'elcm_request',
'restore_bios_config',
'set_secure_boot_mode',
)
diff --git a/ironic/tests/unit/objects/test_node_inventory.py b/ironic/tests/unit/objects/test_node_inventory.py
new file mode 100644
index 000000000..9eee77a16
--- /dev/null
+++ b/ironic/tests/unit/objects/test_node_inventory.py
@@ -0,0 +1,49 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from ironic import objects
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.db import utils as db_utils
+from ironic.tests.unit.objects import utils as obj_utils
+
+
+class TestNodeInventoryObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
+
+ def setUp(self):
+ super(TestNodeInventoryObject, self).setUp()
+ self.fake_inventory = db_utils.get_test_inventory()
+
+ def test_create(self):
+ with mock.patch.object(self.dbapi, 'create_node_inventory',
+ autospec=True) as mock_db_create:
+ mock_db_create.return_value = self.fake_inventory
+ new_inventory = objects.NodeInventory(
+ self.context, **self.fake_inventory)
+ new_inventory.create()
+
+ mock_db_create.assert_called_once_with(self.fake_inventory)
+
+ def test_destroy(self):
+ node_id = self.fake_inventory['node_id']
+ with mock.patch.object(self.dbapi, 'get_node_inventory_by_node_id',
+ autospec=True) as mock_get:
+ mock_get.return_value = self.fake_inventory
+ with mock.patch.object(self.dbapi,
+ 'destroy_node_inventory_by_node_id',
+ autospec=True) as mock_db_destroy:
+ inventory = objects.NodeInventory.get_by_node_id(self.context,
+ node_id)
+ inventory.destroy()
+
+ mock_db_destroy.assert_called_once_with(node_id)
diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py
index 7c58cf4aa..55b34a172 100644
--- a/ironic/tests/unit/objects/test_objects.py
+++ b/ironic/tests/unit/objects/test_objects.py
@@ -676,11 +676,11 @@ class TestObject(_LocalTest, _TestObject):
# version bump. It is an MD5 hash of the object fields and remotable methods.
# The fingerprint values should only be changed if there is a version bump.
expected_object_fingerprints = {
- 'Node': '1.36-8a080e31ba89ca5f09e859bd259b54dc',
+ 'Node': '1.37-6b38eb91aec57532547ea8607f95675a',
'MyObj': '1.5-9459d30d6954bffc7a9afd347a807ca6',
'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905',
- 'Port': '1.10-67381b065c597c8d3a13c5dbc6243c33',
- 'Portgroup': '1.4-71923a81a86743b313b190f5c675e258',
+ 'Port': '1.11-97bf15b61224f26c65e90f007d78bfd2',
+ 'Portgroup': '1.5-df4dc15967f67114d51176a98a901a83',
'Conductor': '1.3-d3f53e853b4d58cae5bfbd9a8341af4a',
'EventType': '1.1-aa2ba1afd38553e3880c267404e8d370',
'NotificationPublisher': '1.0-51a09397d6c0687771fb5be9a999605d',
@@ -721,6 +721,7 @@ expected_object_fingerprints = {
'DeployTemplateCRUDPayload': '1.0-200857e7e715f58a5b6d6b700ab73a3b',
'Deployment': '1.0-ff10ae028c5968f1596131d85d7f5f9d',
'NodeHistory': '1.0-9b576c6481071e7f7eac97317fa29418',
+ 'NodeInventory': '1.0-97692fec24e20ab02022b9db54e8f539',
}
diff --git a/ironic/tests/unit/objects/test_port.py b/ironic/tests/unit/objects/test_port.py
index cf7633808..4c7280216 100644
--- a/ironic/tests/unit/objects/test_port.py
+++ b/ironic/tests/unit/objects/test_port.py
@@ -88,12 +88,16 @@ class TestPortObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
port = objects.Port(self.context, **self.fake_port)
with mock.patch.object(self.dbapi, 'create_port',
autospec=True) as mock_create_port:
- mock_create_port.return_value = db_utils.get_test_port()
+ with mock.patch.object(self.dbapi, 'get_port_by_id',
+ autospec=True) as mock_get_port:
+ test_port = db_utils.get_test_port()
+ mock_create_port.return_value = test_port
+ mock_get_port.return_value = test_port
- port.create()
+ port.create()
- args, _kwargs = mock_create_port.call_args
- self.assertEqual(objects.Port.VERSION, args[0]['version'])
+ args, _kwargs = mock_create_port.call_args
+ self.assertEqual(objects.Port.VERSION, args[0]['version'])
def test_save(self):
uuid = self.fake_port['uuid']
diff --git a/ironic/tests/unit/objects/test_portgroup.py b/ironic/tests/unit/objects/test_portgroup.py
index 81b68437b..7e844dac7 100644
--- a/ironic/tests/unit/objects/test_portgroup.py
+++ b/ironic/tests/unit/objects/test_portgroup.py
@@ -80,13 +80,18 @@ class TestPortgroupObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
def test_create(self):
portgroup = objects.Portgroup(self.context, **self.fake_portgroup)
with mock.patch.object(self.dbapi, 'create_portgroup',
- autospec=True) as mock_create_portgroup:
- mock_create_portgroup.return_value = db_utils.get_test_portgroup()
-
- portgroup.create()
-
- args, _kwargs = mock_create_portgroup.call_args
- self.assertEqual(objects.Portgroup.VERSION, args[0]['version'])
+ autospec=True) as mock_create_pg:
+ with mock.patch.object(self.dbapi, 'get_portgroup_by_id',
+ autospec=True) as mock_get_pg:
+ test_pg = db_utils.get_test_portgroup()
+ mock_create_pg.return_value = test_pg
+ mock_get_pg.return_value = test_pg
+ mock_create_pg.return_value = db_utils.get_test_portgroup()
+
+ portgroup.create()
+
+ args, _kwargs = mock_create_pg.call_args
+ self.assertEqual(objects.Portgroup.VERSION, args[0]['version'])
def test_save(self):
uuid = self.fake_portgroup['uuid']
diff --git a/redfish-interop-profiles/OpenStackIronicProfile.v1_0_0.json b/redfish-interop-profiles/OpenStackIronicProfile.v1_0_0.json
new file mode 100644
index 000000000..a9571bb2f
--- /dev/null
+++ b/redfish-interop-profiles/OpenStackIronicProfile.v1_0_0.json
@@ -0,0 +1,221 @@
+{
+ "@Redfish.Copyright": "Copyright (c) 2020-2022 Dell Inc. or its subsidiaries.",
+ "@Redfish.License": "Apache License, Version 2.0. For full text, see link: http://www.apache.org/licenses/LICENSE-2.0",
+ "SchemaDefinition": "RedfishInteroperabilityProfile.v1_5_1",
+ "ProfileName": "OpenStackIronicProfile",
+ "ProfileVersion": "1.0.0",
+ "Purpose": "Specifies the OpenStack Ironic vendor-independent Redfish service requirements, typically offered by a baseboard management controller (BMC).",
+ "OwningEntity": "Ironic community",
+ "ContactInfo": "openstack-discuss@lists.openstack.org",
+ "Protocol": {
+ "MinVersion": "1.6.0"
+ },
+ "Resources": {
+ "Bios": {
+ "ReadRequirement": "Recommended",
+ "PropertyRequirements": {
+ "Attributes": {}
+ },
+ "ActionRequirements": {
+ "ResetBios": {}
+ }
+ },
+ "ComputerSystem": {
+ "PropertyRequirements": {
+ "AssetTag": {
+ "ReadRequirement": "Recommended"
+ },
+ "Bios": {
+ "ReadRequirement": "Recommended"
+ },
+ "BiosVersion": {
+ "ReadRequirement": "Recommended"
+ },
+ "Boot": {
+ "PropertyRequirements": {
+ "BootSourceOverrideEnabled": {
+ "WriteRequirement": "Mandatory",
+ "MinSupportValues": [
+ "Disabled",
+ "Once",
+ "Continuous"
+ ]
+ },
+ "BootSourceOverrideMode": {
+ "WriteRequirement": "Mandatory",
+ "MinSupportValues": [
+ "UEFI"
+ ]
+ },
+ "BootSourceOverrideTarget": {
+ "WriteRequirement": "Mandatory",
+ "MinSupportValues": [
+ "Pxe",
+ "Hdd",
+ "Cd",
+ "BiosSetup"
+ ]
+ }
+ }
+ },
+ "IndicatorLED": {
+ "WriteRequirement": "Recommended",
+ "ReadRequirement": "Recommended",
+ "MinSupportValues": [
+ "Lit",
+ "Off",
+ "Blinking"
+ ]
+ },
+ "Links": {
+ "PropertyRequirements": {
+ "Chassis": {},
+ "ManagedBy": {}
+ }
+ },
+ "Manufacturer": {
+ "ReadRequirement": "Recommended"
+ },
+ "MemorySummary": {
+ "PropertyRequirements": {
+ "TotalSystemMemoryGiB": {}
+ }
+ },
+ "PowerState": {},
+ "Processors": {},
+ "SimpleStorage": {
+ "ReadRequirement": "Conditional",
+ "ConditionalRequirements": [
+ {
+ "Purpose": "Either SimpleStorage or Storage must be present, even if the system is disk-less.",
+ "CompareProperty": "Storage",
+ "CompareType": "Absent",
+ "ReadRequirement": "Mandatory"
+ }
+ ]
+ },
+ "Status": {
+ "PropertyRequirements": {
+ "Health": {},
+ "State": {}
+ }
+ },
+ "Storage": {
+ "ReadRequirement": "Conditional",
+ "ConditionalRequirements": [
+ {
+ "Purpose": "Either SimpleStorage or Storage must be present, even if the system is disk-less.",
+ "CompareProperty": "SimpleStorage",
+ "CompareType": "Absent",
+ "ReadRequirement": "Mandatory"
+ }
+ ]
+ },
+ "SystemType": {}
+ },
+ "ActionRequirements": {
+ "Reset": {
+ "Parameters": {
+ "ResetType": {
+ "ParameterValues": [
+ "On",
+ "ForceOff",
+ "GracefulShutdown",
+ "GracefulRestart",
+ "ForceRestart",
+ "Nmi"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "Drive": {
+ "ReadRequirement": "IfPopulated",
+ "Purpose": "Either SimpleStorage or Storage must be present, even if the system is disk-less.",
+ "PropertyRequirements": {
+ "CapacityBytes": {
+ "ReadRequirement": "IfPopulated"
+ },
+ "Status": {
+ "PropertyRequirements": {
+ "Health": {},
+ "State": {}
+ }
+ }
+ }
+ },
+ "EthernetInterface": {
+ "URIs": [
+ "/redfish/v1/Systems/{ComputerSystemId}/EthernetInterfaces/{EthernetInterfaceId}"
+ ],
+ "PropertyRequirements": {
+ "MACAddress": {},
+ "Status": {
+ "PropertyRequirements": {
+ "Health": {},
+ "State": {}
+ }
+ }
+ }
+ },
+ "Processor": {
+ "PropertyRequirements": {
+ "ProcessorArchitecture": {},
+ "Status": {
+ "PropertyRequirements": {
+ "Health": {},
+ "State": {}
+ }
+ },
+ "TotalThreads": {}
+ }
+ },
+ "SimpleStorage": {
+ "ReadRequirement": "IfPopulated",
+ "Purpose": "Either SimpleStorage or Storage must be present, even if the system is disk-less.",
+ "PropertyRequirements": {
+ "Devices": {
+ "PropertyRequirements": {
+ "CapacityBytes": {
+ "ReadRequirement": "IfPopulated"
+ },
+ "Status": {
+ "PropertyRequirements": {
+ "Health": {},
+ "State": {}
+ }
+ }
+ }
+ }
+ }
+ },
+ "VirtualMedia": {
+ "PropertyRequirements": {
+ "Image": {},
+ "Inserted": {
+ "ReadRequirement": "Recommended"
+ },
+ "MediaTypes": {
+ "ReadRequirement": "Recommended"
+ },
+ "WriteProtected": {
+ "ReadRequirement": "Recommended"
+ }
+ },
+ "ActionRequirements": {
+ "EjectMedia": {},
+ "InsertMedia": {
+ "Parameters": {
+ "Inserted": {
+ "ReadRequirement": "Recommended"
+ },
+ "WriteProtected": {
+ "ReadRequirement": "Recommended"
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/releasenotes/config.yaml b/releasenotes/config.yaml
new file mode 100644
index 000000000..26538010e
--- /dev/null
+++ b/releasenotes/config.yaml
@@ -0,0 +1,5 @@
+---
+# Ignore the kilo-eol tag because that branch does not work with reno
+# and contains no release notes.
+# Ignore bugfix tags because their releasenotes are covered under stable
+closed_branch_tag_re: 'r"(?!^(kilo-|bugfix-)).+-eol$"'
diff --git a/releasenotes/notes/add-allocations-table-check-38f1c9eef189b411.yaml b/releasenotes/notes/add-allocations-table-check-38f1c9eef189b411.yaml
new file mode 100644
index 000000000..46046bd2a
--- /dev/null
+++ b/releasenotes/notes/add-allocations-table-check-38f1c9eef189b411.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ Adds an upgrade status check for the Allocation table engine and
+ character set encoding on MySQL. This is a result of a missing
+ encoding definition on the table schema when originally created.
+ This issue will be remedied, in part, in a later version of Ironic,
+ but the upgrade status check will provide advance operator visibility.
diff --git a/releasenotes/notes/add-node-inventory-7cde961b14caa11e.yaml b/releasenotes/notes/add-node-inventory-7cde961b14caa11e.yaml
new file mode 100644
index 000000000..93751e7d9
--- /dev/null
+++ b/releasenotes/notes/add-node-inventory-7cde961b14caa11e.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds API version ``1.81`` which enables fetching node inventory
+ which might have been stored during introspection
diff --git a/releasenotes/notes/add-service-role-support-8e9390769508ca99.yaml b/releasenotes/notes/add-service-role-support-8e9390769508ca99.yaml
new file mode 100644
index 000000000..7a2aa7b95
--- /dev/null
+++ b/releasenotes/notes/add-service-role-support-8e9390769508ca99.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Adds support for the ``service`` role, which is intended for service to
+ service communication, such as for those where ``ironic-inspector``,
+ ``nova-compute``, or ``networking-baremetal`` needs to communicate with
+ Ironic's API.
+upgrade:
+ - |
+ Ironic now has support for the ``service`` role, which is available in the
+ ``system`` scope as well as the ``project`` scope. This functionality
+ is for service to service communication, if desired. Effective access rights
+ are similar to the ``manager`` or the ``owner`` scoped admin privileges.
diff --git a/releasenotes/notes/additonal-snmp-drivers-ae1174e6bd6ee3a6.yaml b/releasenotes/notes/additonal-snmp-drivers-ae1174e6bd6ee3a6.yaml
new file mode 100644
index 000000000..f98f2e607
--- /dev/null
+++ b/releasenotes/notes/additonal-snmp-drivers-ae1174e6bd6ee3a6.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds ``raritan_pdu2``, ``servertech_sentry3``, ``servertech_sentry4``,
+ and ``vertivgest_pdu`` snmp drivers to support additional PDU models.
diff --git a/releasenotes/notes/allocations-charset-5384d1ea00964bdd.yaml b/releasenotes/notes/allocations-charset-5384d1ea00964bdd.yaml
new file mode 100644
index 000000000..3db4da086
--- /dev/null
+++ b/releasenotes/notes/allocations-charset-5384d1ea00964bdd.yaml
@@ -0,0 +1,23 @@
+---
+fixes:
+ - |
+ Fixes an missing MySQL/MariaDB character set configuration and default
+ table type encoding for the ``allocations`` database table. Previously,
+ If Ironic's database was attempted to be populated on a machine which
+ was using 4 byte character encoding, such as MySQL/MariaDB on Debian
+ based systems, then the database schema creation would fail.
+upgrade:
+ - This upgrade updates the default character set to utilized in the
+ database tables when using MySQL/MariaDB. Previously, the default
+ for Ironic was ``UTF8``, however we now explicitly set ``UTF8MB3``
+ which is short for "3 byte UTF8" encoding. The exception to this
+ is the ``allocations`` table, which would just rely upon the database
+ default. This was done as Ironic's database schema is incompatible
+ with MySQL/MariaDB's ``UTF8MB4``, or "4 byte UTF8" character encoding
+ and storage constraints.
+ - Upgrading will change the default chracter encoding of all tables.
+ For most tables, this should be an effective noop, but may result in
+ transitory table locks. For the ``allocations`` table, it will need to
+ be re-written, during which the database engine will have locked the
+ table from being used. Operators are advised to perform test upgrades
+ and set expectation and upgrade plans accordingly.
diff --git a/releasenotes/notes/anaconda-permit-cert-validation-disable-6611d3cb9401031d.yaml b/releasenotes/notes/anaconda-permit-cert-validation-disable-6611d3cb9401031d.yaml
new file mode 100644
index 000000000..59d306c5d
--- /dev/null
+++ b/releasenotes/notes/anaconda-permit-cert-validation-disable-6611d3cb9401031d.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Adds a configuration option, ``[anaconda]insecure_heartbeat`` to allow
+ for TLS certificate validation to be disabled in the ``anaconda``
+ deployment interface, which is needed for continious integration to
+ be able to be performed without substantial substrate image customization.
+ This option is *not* advised for any production usage.
diff --git a/releasenotes/notes/catch-all-cleaning-exceptions-1317a534a1c9db56.yaml b/releasenotes/notes/catch-all-cleaning-exceptions-1317a534a1c9db56.yaml
new file mode 100644
index 000000000..eb3cc61f9
--- /dev/null
+++ b/releasenotes/notes/catch-all-cleaning-exceptions-1317a534a1c9db56.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Fixes an issue where unexpected exceptions coming from the process to
+ start cleaning would not trigger the cleaning_error_handler which
+ performs the needful internal resets to permit cleaning to be retried
+ again in the future. Now any error which is encountered during the
+ launch of cleaning will trigger the error handler.
diff --git a/releasenotes/notes/change-c9c01700dcfd599b.yaml b/releasenotes/notes/change-c9c01700dcfd599b.yaml
new file mode 100644
index 000000000..dbb3bb11e
--- /dev/null
+++ b/releasenotes/notes/change-c9c01700dcfd599b.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ Two statsd metrics names have been modified to provide structural clarity
+ and consistency for consumers of statistics metrics. Consumers of metrics
+ statistics may need to update their dashboards as the
+ ``post_clean_step_hook`` metric is now named
+ ``AgentBase.post_clean_step_hook``, and the ``post_deploy_step_hook`` is
+ now named ``AgentBase.post_deploy_step_hook``.
diff --git a/releasenotes/notes/cleaning-error-5c13c33c58404b97.yaml b/releasenotes/notes/cleaning-error-5c13c33c58404b97.yaml
new file mode 100644
index 000000000..270278f1b
--- /dev/null
+++ b/releasenotes/notes/cleaning-error-5c13c33c58404b97.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ When aborting cleaning, the ``last_error`` field is no longer initially
+ empty. It is now populated on the state transition to ``clean failed``.
+ - |
+ When cleaning or deployment fails, the ``last_error`` field is no longer
+ temporary set to ``None`` while the power off action is running.
diff --git a/releasenotes/notes/concurrency-limit-control-4b101bca7136e08d.yaml b/releasenotes/notes/concurrency-limit-control-4b101bca7136e08d.yaml
new file mode 100644
index 000000000..5eb8dd449
--- /dev/null
+++ b/releasenotes/notes/concurrency-limit-control-4b101bca7136e08d.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ Adds a concurrency limiter for number of nodes in states related to
+ *Cleaning* and *Provisioning* operations across the ironic deployment.
+ These settings default to a maximum number of concurrent deployments to
+ ``250`` and a maximum number of concurrent deletes and cleaning operations
+ to ``50``. These settings can be tuned using
+ ``[conductor]max_concurrent_deploy`` and
+ ``[conductor]max_concurrent_clean``, respectively.
+ The defaults should generally be good for most operators in most cases.
+ Large scale operators should evaluate the defaults and tune appropriately
+ as this feature cannot be disabled, as it is a security mechanism.
+upgrade:
+ - |
+ Large scale operators should be aware that a new feature, referred to as
+ "Concurrent Action Limit" was introduced as a security mechanism to
+ provide a means to limit attackers, or faulty scripts, from potentially
+ causing irreperable harm to an environment. This feature cannot be
+ disabled, and operators are encouraged to tune the new settings
+ ``[conductor]max_concurrent_deploy`` and
+ ``[conductor]max_concurrent_clean`` to match the needs of their
+ environment.
diff --git a/releasenotes/notes/conductor-metric-collector-support-1b8b8c71f9f59da4.yaml b/releasenotes/notes/conductor-metric-collector-support-1b8b8c71f9f59da4.yaml
new file mode 100644
index 000000000..dfa3b0f89
--- /dev/null
+++ b/releasenotes/notes/conductor-metric-collector-support-1b8b8c71f9f59da4.yaml
@@ -0,0 +1,39 @@
+---
+features:
+ - |
+ Adds the ability for Ironic to send conductor process metrics
+ for monitoring. This requires the use of a new ``[metrics]backend``
+ option value of ``collector``. This data was previously only available
+ through the use of statsd. This requires ``ironic-lib`` version ``5.4.0``
+ or newer. This capability can be disabled using the
+ ``[sensor_data]enable_for_conductor`` option if set to False.
+ - |
+ Adds a ``[sensor_data]enable_for_nodes`` configuration option
+ to allow operators to disable sending node metric data via the
+ message bus notifier.
+ - |
+ Adds a new gauge metric ``ConductorManager.PowerSyncNodesCount``
+ which tracks the nodes considered for power state synchrnozation.
+ - Adds a new gauge metric ``ConductorManager.PowerSyncRecoveryNodeCount``
+ which represents the number of nodes which are being evaluated for power
+ state recovery checking.
+ - Adds a new gauge metric ``ConductorManager.SyncLocalStateNodeCount``
+ which represents the number of nodes being tracked locally by the
+ conductor.
+issues:
+ - Sensor data notifications to the message bus, such as using the
+ ``[metrics]backend`` configuration option of ``collector`` on a dedicated
+ API service process or instance, is not presently supported. This
+ functionality requires a periodic task to trigger the transmission
+ of metrics messages to the message bus notifier.
+deprecations:
+ - The setting values starting with ``send_sensor`` in the ``[conductor]``
+ configuration group have been deprecated and moved to a ``[sensor_data]``
+ configuration group. The names have been updated to shorter, operator
+ friendly names..
+upgrades:
+ - Settings starting with ``sensor_data`` in the ``[conductor]``
+ configuration group have been moved to a ``[sensor_data]`` configuration
+ group amd have been renamed to have shorter value names. If configuration
+ values are not updated, the ``oslo.config`` library will emit a warning
+ in the logs.
diff --git a/releasenotes/notes/console-pid-file-6108d2775ef947fe.yaml b/releasenotes/notes/console-pid-file-6108d2775ef947fe.yaml
new file mode 100644
index 000000000..427d04da8
--- /dev/null
+++ b/releasenotes/notes/console-pid-file-6108d2775ef947fe.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes an issue that when a node has console enabled but pid
+ file missing, the console could not be disabled as well as be
+ restarted, which makes the console feature unusable.
diff --git a/releasenotes/notes/correct-source-path-handling-lookups-4ce2023a56372f10.yaml b/releasenotes/notes/correct-source-path-handling-lookups-4ce2023a56372f10.yaml
new file mode 100644
index 000000000..10d270a45
--- /dev/null
+++ b/releasenotes/notes/correct-source-path-handling-lookups-4ce2023a56372f10.yaml
@@ -0,0 +1,16 @@
+---
+fixes:
+ - |
+ Fixes an issue where image information retrieval would fail when a
+ path was supplied when using the ``anaconda`` deploy interface,
+ as `HTTP` ``HEAD`` requests on a URL path have no ``Content-Length``.
+ We now consider if a path is used prior to attempting to collect
+ additional configuration data from what is normally expected to
+ be Glance.
+ - |
+ Fixes an issue where the fallback to a default kickstart template
+ value would result in error indicating
+ "Scheme-less image href is not a UUID".
+ This was becaues the handling code falling back to the default
+ did not explicitly indicate it was a file URL before saving the
+ value.
diff --git a/releasenotes/notes/create_csr_clean_step-a720932f61b42118.yaml b/releasenotes/notes/create_csr_clean_step-a720932f61b42118.yaml
new file mode 100644
index 000000000..1951245c1
--- /dev/null
+++ b/releasenotes/notes/create_csr_clean_step-a720932f61b42118.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds new clean steps ``create_csr`` and ``add_https_certificate``
+ to ``ilo`` and ``ilo5`` hardware types which allows users to
+ create Certificate Signing Request(CSR) and adds signed HTTPS
+ certificate to the iLO.
diff --git a/releasenotes/notes/dnsmasq_dhcp-9154fcae927dc3de.yaml b/releasenotes/notes/dnsmasq_dhcp-9154fcae927dc3de.yaml
new file mode 100644
index 000000000..bbf7dad40
--- /dev/null
+++ b/releasenotes/notes/dnsmasq_dhcp-9154fcae927dc3de.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ The ``[dhcp]dhcp_provider`` configuration option can now be set to
+ ``dnsmasq`` as an alternative to ``none`` for standalone deployments. This
+ enables the same node-specific DHCP capabilities as the ``neutron`` provider.
+ See the ``[dnsmasq]`` section for configuration options. \ No newline at end of file
diff --git a/releasenotes/notes/fakedelay-7eac23ad8881a736.yaml b/releasenotes/notes/fakedelay-7eac23ad8881a736.yaml
new file mode 100644
index 000000000..fe02d33ff
--- /dev/null
+++ b/releasenotes/notes/fakedelay-7eac23ad8881a736.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ There are now configurable random wait times for fake drivers in a new
+ ironic.conf [fake] section. Each supported driver having one configuration
+ option controlling the delay. These delays are applied to operations which
+ typically block in other drivers. This allows more realistic scenarios to
+ be arranged for performance and functional testing of ironic itself.
diff --git a/releasenotes/notes/fix-cleaning-stuck-on-networkerror-4aedbf3673413af6.yaml b/releasenotes/notes/fix-cleaning-stuck-on-networkerror-4aedbf3673413af6.yaml
new file mode 100644
index 000000000..f7769afc1
--- /dev/null
+++ b/releasenotes/notes/fix-cleaning-stuck-on-networkerror-4aedbf3673413af6.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Fixes an issue where cleaning operations could fail in such a way that was
+ not easily recoverable when pre-cleaning network interface configuration
+ was validated, yet contained invalid configuration.
+ Now Ironic properly captures the error and exits from cleaning in a
+ state which allows for cleaning to be retried.
diff --git a/releasenotes/notes/fix-console-port-conflict-6dc19688079e2c7f.yaml b/releasenotes/notes/fix-console-port-conflict-6dc19688079e2c7f.yaml
new file mode 100644
index 000000000..32b419915
--- /dev/null
+++ b/releasenotes/notes/fix-console-port-conflict-6dc19688079e2c7f.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ Fixes issues that auto-allocated console port could conflict on the same
+ host under certain circumstances related to conductor takeover.
+
+ For more information, see `story 2010489
+ <https://storyboard.openstack.org/#!/story/2010489>`_.
diff --git a/releasenotes/notes/fix-context-image-hardlink-16f452974abc7327.yaml b/releasenotes/notes/fix-context-image-hardlink-16f452974abc7327.yaml
new file mode 100644
index 000000000..90d38d5cc
--- /dev/null
+++ b/releasenotes/notes/fix-context-image-hardlink-16f452974abc7327.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes an issue where if selinux is enabled and enforcing, and
+ the published image is a hardlink, the source selinux context
+ is preserved, causing access denied when retrieving the image
+ using hardlink URL.
diff --git a/releasenotes/notes/fix-grub2-uefi-config-path-f1b4c5083cc97ee5.yaml b/releasenotes/notes/fix-grub2-uefi-config-path-f1b4c5083cc97ee5.yaml
new file mode 100644
index 000000000..ddb6c86cb
--- /dev/null
+++ b/releasenotes/notes/fix-grub2-uefi-config-path-f1b4c5083cc97ee5.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ Fixes the default value for the ``[DEFAULT]grub_config_path`` variable to
+ be the default path for UEFI bootloader configurations, where as the
+ default was previously the BIOS grub2 configuration path.
+upgrades:
+ - |
+ The default configuration value for ``[DEFAULT]grub_config_path`` has
+ been changed from ``/boot/grub/grub.conf`` to ``EFI/BOOT/grub.efi`` as
+ the configuration parameter was for UEFI boot configuration, and the
+ ``/boot/grub/grub2.conf`` path is for BIOS booting. This was verified
+ by referencing several working UEFI virtual media examples where this
+ value was overridden to the new configuration value.
diff --git a/releasenotes/notes/fix-idrac-redfish-controller-mode-7b55c58d09240d3c.yaml b/releasenotes/notes/fix-idrac-redfish-controller-mode-7b55c58d09240d3c.yaml
new file mode 100644
index 000000000..bf476dd63
--- /dev/null
+++ b/releasenotes/notes/fix-idrac-redfish-controller-mode-7b55c58d09240d3c.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes ``idrac-redfish`` RAID ``delete_configuration`` step to convert PERC
+ 9 and PERC 10 controllers to RAID mode if it is not already set.
diff --git a/releasenotes/notes/fix-ilo-boot-interface-order-238a2da9933cf28c.yaml b/releasenotes/notes/fix-ilo-boot-interface-order-238a2da9933cf28c.yaml
new file mode 100644
index 000000000..586ea6b82
--- /dev/null
+++ b/releasenotes/notes/fix-ilo-boot-interface-order-238a2da9933cf28c.yaml
@@ -0,0 +1,26 @@
+---
+fixes:
+ - |
+ Fixes the default boot interface order for the ``ilo`` hardware type
+ where previously it would prefer ``pxe`` over ``ipxe``. This created
+ inconsistencies for operators using multiple hardware types, where
+ both interfaces were enabled in the deployment.
+upgrade:
+ - |
+ Operators who are upgrading should be aware that a bug was discovered
+ with the automatic selection of ``boot_interface`` for users of the
+ ``ilo`` and ``ilo5`` hardware types. This was an inconsistency,
+ resulting in ``pxe`` being selected instead of ``ipxe`` if both
+ boot interfaces were enabled. Depending on the local configuration,
+ this may, or may not have happened and will remain static on
+ preexisting baremetal nodes. Some users may have been relying
+ upon this incorrect behavior by having misalligned defaults by trying
+ to use the ``pxe`` interface for ``ipxe``. Users wishing to continue
+ this usage as it was previously will need to explicitly set a
+ ``boot_interface`` value to either ``pxe`` or ``ilo-ipxe`` by default,
+ depending on the local configuration. Most operators have leveraged
+ the default examples, and thus did not explicitly encounter this
+ condition. Operators explicitly wishing to use ``pxe`` boot interfaces
+ with the ``ipxe`` templates and defaults set to override the defaults
+ for ``ironic.conf`` will need to either continue to leverage default
+ override configurations in their ``ironic.conf`` file.
diff --git a/releasenotes/notes/fix-inspectwait-finished-at-4b817af4bf4c30c2.yaml b/releasenotes/notes/fix-inspectwait-finished-at-4b817af4bf4c30c2.yaml
new file mode 100644
index 000000000..167a7f4a5
--- /dev/null
+++ b/releasenotes/notes/fix-inspectwait-finished-at-4b817af4bf4c30c2.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes a database API internal check to update the
+ ``inspection_finished_at`` field upon the completion of inspection.
diff --git a/releasenotes/notes/fix-irmc-s6-2.00-http-incompatibility-61a31d12aa33fbd8.yaml b/releasenotes/notes/fix-irmc-s6-2.00-http-incompatibility-61a31d12aa33fbd8.yaml
new file mode 100644
index 000000000..f6e91c1ab
--- /dev/null
+++ b/releasenotes/notes/fix-irmc-s6-2.00-http-incompatibility-61a31d12aa33fbd8.yaml
@@ -0,0 +1,19 @@
+---
+upgrade:
+ - |
+ Since iRMC versions S6 2.00 and later, iRMC firmware doesn't
+ support HTTP connection to REST API. Operators need to set
+ ``[irmc] port`` in ironic.conf or ``driver_info/irmc_port``
+ to 443.
+features:
+ - |
+ Adds verify step and node vendor passthru method to deal with
+ a firmware incompatibility issue with iRMC versions S6 2.00
+ and later in which HTTP connection to REST API is not supported
+ and HTTPS connections to REST API is required.
+
+ Verify step checks connection to iRMC REST API and if connection
+ succeeds, it fetches version of iRMC firmware and store it in
+ ``driver_internal_info/irmc_fw_version``. Ironic operators use
+ node vendor passthru method to fetch & update iRMC firmware
+ version cached in ``driver_internal_info/irmc_fw_version``.
diff --git a/releasenotes/notes/fix-irmc-s6-2.00-ipmi-incompatibility-118484a424df02b1.yaml b/releasenotes/notes/fix-irmc-s6-2.00-ipmi-incompatibility-118484a424df02b1.yaml
new file mode 100644
index 000000000..4e4875f2c
--- /dev/null
+++ b/releasenotes/notes/fix-irmc-s6-2.00-ipmi-incompatibility-118484a424df02b1.yaml
@@ -0,0 +1,15 @@
+---
+fixes:
+ - |
+ Fixes a firmware incompatibility issue with iRMC versions S6 2.00
+ and later now doesn't support IPMI over LAN by default.
+ To deal with this problem, irmc driver first tries IPMI operation then,
+ if IPMI operation fails, it tries Redfish API of Fujitsu server.
+ The operator must set Redfish parameters in the ``driver_info``
+ if iRMC disable or doesn't support IPMI over LAN.
+upgrade:
+ - |
+ When Ironic operator uses irmc driver against Fujitsu server which runs
+ iRMC version S6 2.00 or later, operator may need to set Redfish parameters
+ in ``driver_info`` so this fix can operate properly or operator should
+ enable IPMI over LAN through BMC settings, if possible.
diff --git a/releasenotes/notes/fix-nonetype-object-is-not-iterable-0592926d890d6c11.yaml b/releasenotes/notes/fix-nonetype-object-is-not-iterable-0592926d890d6c11.yaml
new file mode 100644
index 000000000..ec9043adb
--- /dev/null
+++ b/releasenotes/notes/fix-nonetype-object-is-not-iterable-0592926d890d6c11.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes ``'NoneType' object is not iterable`` in conductor logs for
+ ``redfish`` and ``idrac-redfish`` RAID clean and deploy steps. The message
+ should no longer appear. For affected nodes re-create the node or delete
+ ``raid_configs`` entry from ``driver_internal_info`` field.
diff --git a/releasenotes/notes/fix_anaconda-70f4268edc255ff4.yaml b/releasenotes/notes/fix_anaconda-70f4268edc255ff4.yaml
new file mode 100644
index 000000000..3882a2820
--- /dev/null
+++ b/releasenotes/notes/fix_anaconda-70f4268edc255ff4.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes the URL based anaconda deployment for parsing the given ``image_source``
+ url.
diff --git a/releasenotes/notes/fix_anaconda_pxe-6c75d42872424fec.yaml b/releasenotes/notes/fix_anaconda_pxe-6c75d42872424fec.yaml
new file mode 100644
index 000000000..ecdc3468c
--- /dev/null
+++ b/releasenotes/notes/fix_anaconda_pxe-6c75d42872424fec.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes URL based anaconda deploy to work in pxe boot. It also enables
+ grub based pxe anaconda deploy which is required for ``ilo`` hardware
+ type.
diff --git a/releasenotes/notes/ilo-event-subscription-0dadf136411bd16a.yaml b/releasenotes/notes/ilo-event-subscription-0dadf136411bd16a.yaml
new file mode 100644
index 000000000..fcfc515e4
--- /dev/null
+++ b/releasenotes/notes/ilo-event-subscription-0dadf136411bd16a.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Provides vendor passthru methods for ``ilo`` and ``ilo5`` hardware types
+ to create, delete and get subscriptions for BMC events. These methods are
+ supported for ``HPE ProLiant Gen10`` and ``HPE ProLiant Gen10 Plus``
+ servers.
diff --git a/releasenotes/notes/irmc-add-snmp-auth-protocols-3ff7597cea7ef9dd.yaml b/releasenotes/notes/irmc-add-snmp-auth-protocols-3ff7597cea7ef9dd.yaml
new file mode 100644
index 000000000..4d0c6bff2
--- /dev/null
+++ b/releasenotes/notes/irmc-add-snmp-auth-protocols-3ff7597cea7ef9dd.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Adds ``sha256``, ``sha384`` and ``sha512`` as supported SNMPv3
+ authentication protocols to iRMC driver.
diff --git a/releasenotes/notes/irmc-align-with-ironic-default-boot-mode-dde6f65ea084c9e6.yaml b/releasenotes/notes/irmc-align-with-ironic-default-boot-mode-dde6f65ea084c9e6.yaml
new file mode 100644
index 000000000..2e4b5ba60
--- /dev/null
+++ b/releasenotes/notes/irmc-align-with-ironic-default-boot-mode-dde6f65ea084c9e6.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Modify iRMC driver to use ironic.conf [deploy] default_boot_mode to determine
+ default boot_mode.
diff --git a/releasenotes/notes/irmc-change-boot-interface-order-e76f5018da116a90.yaml b/releasenotes/notes/irmc-change-boot-interface-order-e76f5018da116a90.yaml
new file mode 100644
index 000000000..3ad35a07c
--- /dev/null
+++ b/releasenotes/notes/irmc-change-boot-interface-order-e76f5018da116a90.yaml
@@ -0,0 +1,26 @@
+---
+fixes:
+ - |
+ Fixes the default boot interface order for the ``irmc`` hardware type
+ where previously it would prefer ``irmc-pxe`` over ``ipxe``. This
+ created inconsistencies for operators using multiple hardware types,
+ where both interfaces were enabled in the deployment.
+upgrade:
+ - |
+ Operators who are upgrading should be aware that a bug was discovered
+ with the automatic selection of ``boot_interface`` for users of the
+ ``irmc`` hardware types. This was an inconsistency, resulting in
+ ``irmc-pxe`` being selected instead of ``ipxe`` if these boot
+ interfaces were enabled. Depending on the local configuration,
+ this may, or may not have happened and will remain static on
+ preexisting baremetal nodes. Some users may have been relying upon
+ this incorrect behavior by having mis-alligned defaults by trying to
+ use the ``irmc-pxe`` interface for ``ipxe``. Users wishing to continue
+ this usage as it was previously will need to explicitly set a
+ ``boot_interface`` value to either ``pxe`` or ``irmc-pxe``, depending
+ on the local configuration. Most operators have leveraged the default
+ examples, and thus did not explicitly encounter this condition.
+ Operators explicitly wishing to use ``pxe`` boot interfaces with
+ the ``ipxe`` templates and defaults set to override the defaults
+ for ``ironic.conf`` will need to either continue to leverage default
+ override configurations in their ``ironic.conf`` file.
diff --git a/releasenotes/notes/jsonschema-4.8-1146d103b877cffd.yaml b/releasenotes/notes/jsonschema-4.8-1146d103b877cffd.yaml
new file mode 100644
index 000000000..75c0a6c50
--- /dev/null
+++ b/releasenotes/notes/jsonschema-4.8-1146d103b877cffd.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes API error messages with jsonschema>=4.8. A possible root cause is
+ now detected for generic schema errors.
diff --git a/releasenotes/notes/lockutils-default-logging-8c38b8c0ac71043f.yaml b/releasenotes/notes/lockutils-default-logging-8c38b8c0ac71043f.yaml
new file mode 100644
index 000000000..6ef3fd546
--- /dev/null
+++ b/releasenotes/notes/lockutils-default-logging-8c38b8c0ac71043f.yaml
@@ -0,0 +1,8 @@
+---
+other:
+ - |
+ The default logging level for the ``oslo_concurrencty.lockutils``
+ module logging has been changed to ``WARNING``. By default, the debug
+ logging was resulting in lots of noise. Operators wishing to view debug
+ logging for this module can tuilize the ``[DEFAULT]default_log_levels``
+ configuration option.
diff --git a/releasenotes/notes/maximum-disk-erasure-concurrency-6d132bd84e3df4cf.yaml b/releasenotes/notes/maximum-disk-erasure-concurrency-6d132bd84e3df4cf.yaml
new file mode 100644
index 000000000..f09421593
--- /dev/null
+++ b/releasenotes/notes/maximum-disk-erasure-concurrency-6d132bd84e3df4cf.yaml
@@ -0,0 +1,10 @@
+---
+other:
+ - |
+ The maximum disk erasure concurrency setting,
+ ``[deploy]disk_erasure_concurrency`` has been incremed to 4.
+ Previously, this was kept at 1 in order to maintain continuity of
+ experience, but operators have not reported any issues with an increased
+ concurrency, and as such we feel comfortable upstream enabling concurrent
+ disk erasure/cleaning. This setting applies to the ``erase_devices`` clean
+ step.
diff --git a/releasenotes/notes/node-creation-no-longer-scope-restricted-b455f66a751f10ec.yaml b/releasenotes/notes/node-creation-no-longer-scope-restricted-b455f66a751f10ec.yaml
new file mode 100644
index 000000000..b405dddb3
--- /dev/null
+++ b/releasenotes/notes/node-creation-no-longer-scope-restricted-b455f66a751f10ec.yaml
@@ -0,0 +1,27 @@
+---
+features:
+ - |
+ Adds the capability for a project scoped ``admin`` user to be able to
+ create nodes in Ironic, which are then manageable by the project scoped
+ ``admin`` user. Effectively, this is self service Bare Metal as a Service,
+ however more advanced fields such as drivers, chassies, are not available
+ to these users. This is controlled through an auto-population of the
+ Node ``owner`` field, and can be controlled through the
+ ``[api]project_admin_can_manage_own_nodes`` setting, which defaults to
+ ``True``, and the new policy ``baremetal:node:create:self_owned_node``.
+ - |
+ Adds the capability for a project scoped ``admin`` user to be able to
+ delete nodes from Ironic which their `project` owns. This can be
+ contolled through the ``[api]project_admin_can_manage_own_nodes``
+ setting, which defaults to ``True``, as well as the
+ ``baremetal:node:delete:self_owned_node`` policy.
+security:
+ - |
+ This release contains an improvement which, by default, allows users to
+ create and delete baremetal nodes inside their own project. This can be
+ disabled using the ``[api]project_admin_can_manage_own_nodes`` setting.
+upgrades:
+ - |
+ The API version has been increased to ``1.80`` in order to signify
+ the addition of additoinal Role Based Access Controls capabilities
+ around node creation and deletion. \ No newline at end of file
diff --git a/releasenotes/notes/prepare-for-sqlalchemy-20-e817f340f261b1a2.yaml b/releasenotes/notes/prepare-for-sqlalchemy-20-e817f340f261b1a2.yaml
new file mode 100644
index 000000000..5174f09e4
--- /dev/null
+++ b/releasenotes/notes/prepare-for-sqlalchemy-20-e817f340f261b1a2.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Ironic has started the process of upgrading the code base to support
+ SQLAlchemy 2.0 in anticipation of it's release. This results in the
+ minimum version of SQLAlchemy becoming 1.4.0 as it contains migration
+ features for the move to SQLAlchemy 2.0.
diff --git a/releasenotes/notes/redfish_consider_password_in_session_cache-1fa84234db179053.yaml b/releasenotes/notes/redfish_consider_password_in_session_cache-1fa84234db179053.yaml
new file mode 100644
index 000000000..af48b88fa
--- /dev/null
+++ b/releasenotes/notes/redfish_consider_password_in_session_cache-1fa84234db179053.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes an issue where the Redfish session cache would continue using an
+ old session when a password for a Redfish BMC was changed. Now the old
+ session will not be found in this case, and a new session will be created
+ with the latest credential information available.
diff --git a/releasenotes/notes/shard-support-a26f8d2ab5cca582.yaml b/releasenotes/notes/shard-support-a26f8d2ab5cca582.yaml
new file mode 100644
index 000000000..10104bf91
--- /dev/null
+++ b/releasenotes/notes/shard-support-a26f8d2ab5cca582.yaml
@@ -0,0 +1,14 @@
+features:
+ - Adds support for setting a shard key on a node, and filtering node or port
+ lists by shard. This shard key is not used for any purpose internally in
+ Ironic, but instead is intended to allow API clients to filter for a
+ subset of nodes or ports. Being able to fetch only a subset of nodes or
+ ports is useful for parallelizing any operational task that needs to be
+ performed across all nodes or ports.
+ - Adds support for querying for nodes which are sharded or unsharded. This
+ is useful for allowing operators to find nodes which have not been
+ assigned a shard key.
+ - Adds support for querying for a list of shards via ``/v1/shards``. This
+ endpoint will return a list of currently assigned shard keys as well as
+ the count of nodes which has those keys assigned. Using this API endpoint,
+ operators can see a high level listing of how their nodes are sharded.
diff --git a/releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml b/releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml
index df9bef955..a829cbd97 100644
--- a/releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml
+++ b/releasenotes/notes/skip-clear-job-queue-idrac-reset-if-attr-missing-b2a2b609c906c6c4.yaml
@@ -1,8 +1,8 @@
---
fixes:
- |
- Resolved clear_job_queue and reset_idrac verify step failures which occur
- when the functionality is not supported by the iDRAC. When this condition
- is detected, the code in the step handles the exception and logs a warning
- and completes successfully in case of verification steps but fails in case
- of cleaning steps.
+ Resolved ``clear_job_queue`` and ``reset_idrac`` verify step failures which
+ occur when the functionality is not supported by the iDRAC. When this
+ condition is detected, the code in the step handles the exception and logs
+ a warning and completes successfully in case of verification steps but
+ fails in case of cleaning steps.
diff --git a/releasenotes/notes/wait_hash_ring_reset-ef8bd548659e9906.yaml b/releasenotes/notes/wait_hash_ring_reset-ef8bd548659e9906.yaml
new file mode 100644
index 000000000..cea3e28f3
--- /dev/null
+++ b/releasenotes/notes/wait_hash_ring_reset-ef8bd548659e9906.yaml
@@ -0,0 +1,13 @@
+---
+fixes:
+ - |
+ When a conductor service is stopped it will now continue to respond to RPC
+ requests until ``[DEFAULT]hash_ring_reset_interval`` has elapsed, allowing
+ a hash ring reset to complete on the cluster after conductor is
+ unregistered. This will improve the reliability of the cluster when scaling
+ down or rolling out updates.
+
+ This delay only occurs when there is more than one online conductor,
+ to allow fast restarts on single-node ironic installs (bifrost,
+ metal3).
+
diff --git a/releasenotes/notes/zed-prelude-09fe95b11ad2459d.yaml b/releasenotes/notes/zed-prelude-09fe95b11ad2459d.yaml
new file mode 100644
index 000000000..5b0b43848
--- /dev/null
+++ b/releasenotes/notes/zed-prelude-09fe95b11ad2459d.yaml
@@ -0,0 +1,12 @@
+---
+prelude: >
+ The Ironic team hereby announces the release of the `Zed` version of
+ Ironic. This version, *21.1.0*, represents the collaboration of Ironic's
+ contributors during the `Zed` release cycle, which first saw the release
+ of Ironic *20.2.0*, and Ironic *21.1.0*. These versions saw improvements
+ in functionality to better support infrastructure operators from the
+ configuration of individual nodes, to support a greater separation
+ of duties, and ultimately Self-Service Bare Metal as a Service, or
+ "SSBMaaS". Along with these features, these releases have seen numerous
+ bug fixes. We sincerely hope you enjoy it!
+
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 8e7c63864..107450a67 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ zed
yoga
xena
wallaby
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index f5f519da5..6878c43c6 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -3,15 +3,16 @@
# Andi Chandler <andi@gowling.com>, 2019. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
# Andi Chandler <andi@gowling.com>, 2022. #zanata
+# Andi Chandler <andi@gowling.com>, 2023. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Ironic Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-07-06 13:27+0000\n"
+"POT-Creation-Date: 2023-02-01 23:20+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-07-24 12:21+0000\n"
+"PO-Revision-Date: 2023-02-03 04:37+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -136,9 +137,6 @@ msgstr "10.1.8"
msgid "10.1.9"
msgstr "10.1.9"
-msgid "11.0.0"
-msgstr "11.0.0"
-
msgid "11.1.0"
msgstr "11.1.0"
@@ -157,9 +155,6 @@ msgstr "11.1.4"
msgid "11.1.4-12"
msgstr "11.1.4-12"
-msgid "12.0.0"
-msgstr "12.0.0"
-
msgid "12.1.0"
msgstr "12.1.0"
@@ -184,9 +179,6 @@ msgstr "12.1.6"
msgid "12.1.6-3"
msgstr "12.1.6-3"
-msgid "12.2.0"
-msgstr "12.2.0"
-
msgid "13.0.0"
msgstr "13.0.0"
@@ -211,8 +203,8 @@ msgstr "13.0.6"
msgid "13.0.7"
msgstr "13.0.7"
-msgid "13.0.7-24"
-msgstr "13.0.7-24"
+msgid "13.0.7-29"
+msgstr "13.0.7-29"
msgid "14.0.0"
msgstr "14.0.0"
@@ -226,8 +218,8 @@ msgstr "15.0.1"
msgid "15.0.2"
msgstr "15.0.2"
-msgid "15.0.2-16"
-msgstr "15.0.2-16"
+msgid "15.0.2-25"
+msgstr "15.0.2-25"
msgid "15.1.0"
msgstr "15.1.0"
@@ -253,6 +245,9 @@ msgstr "16.0.4"
msgid "16.0.5"
msgstr "16.0.5"
+msgid "16.0.5-11"
+msgstr "16.0.5-11"
+
msgid "16.1.0"
msgstr "16.1.0"
@@ -271,8 +266,11 @@ msgstr "17.0.3"
msgid "17.0.4"
msgstr "17.0.4"
-msgid "17.0.4-27"
-msgstr "17.0.4-27"
+msgid "17.1.0"
+msgstr "17.1.0"
+
+msgid "17.1.0-6"
+msgstr "17.1.0-6"
msgid "18.0.0"
msgstr "18.0.0"
@@ -286,8 +284,11 @@ msgstr "18.2.0"
msgid "18.2.1"
msgstr "18.2.1"
-msgid "18.2.1-17"
-msgstr "18.2.1-17"
+msgid "18.2.2"
+msgstr "18.2.2"
+
+msgid "18.2.2-6"
+msgstr "18.2.2-6"
msgid "19.0.0"
msgstr "19.0.0"
@@ -298,18 +299,45 @@ msgstr "20.0.0"
msgid "20.1.0"
msgstr "20.1.0"
-msgid "20.1.0-12"
-msgstr "20.1.0-12"
+msgid "20.1.1"
+msgstr "20.1.1"
+
+msgid "20.1.1-6"
+msgstr "20.1.1-6"
msgid "20.2.0"
msgstr "20.2.0"
-msgid "20.2.0-21"
-msgstr "20.2.0-21"
+msgid "21.0.0"
+msgstr "21.0.0"
+
+msgid "21.1.0"
+msgstr "21.1.0"
+
+msgid "21.1.0-6"
+msgstr "21.1.0-6"
+
+msgid "21.2.0"
+msgstr "21.2.0"
+
+msgid "21.3.0"
+msgstr "21.3.0"
+
+msgid "21.3.0-4"
+msgstr "21.3.0-4"
msgid "4.0.0 First semver release"
msgstr "4.0.0 First semver release"
+msgid "4.1.0"
+msgstr "4.1.0"
+
+msgid "4.2.0"
+msgstr "4.2.0"
+
+msgid "4.2.1"
+msgstr "4.2.1"
+
msgid "4.2.2"
msgstr "4.2.2"
@@ -514,6 +542,15 @@ msgstr ""
"masked for this request."
msgid ""
+"A driver that handles booting itself (for example, a driver that implements "
+"booting from virtual media) should use the following to make calls to the "
+"boot interface a no-op::"
+msgstr ""
+"A driver that handles booting itself (for example, a driver that implements "
+"booting from virtual media) should use the following to make calls to the "
+"boot interface a no-op::"
+
+msgid ""
"A few major changes are worth mentioning. This is not an exhaustive list, "
"and mostly includes changes from 9.0.0:"
msgstr ""
@@ -530,21 +567,6 @@ msgstr "A few major changes since 9.1.x (Pike) are worth mentioning:"
msgid ""
"A future release will change the default value of ``[deploy]/"
-"default_boot_mode`` from \"bios\" to \"uefi\". It is recommended to set an "
-"explicit value for this option. For hardware types which don't support "
-"setting boot mode, a future release will assume boot mode is set to UEFI if "
-"no boot mode is set to node's capabilities. It is also recommended to set "
-"``boot_mode`` into ``properties/capabilities`` of a node."
-msgstr ""
-"A future release will change the default value of ``[deploy]/"
-"default_boot_mode`` from \"bios\" to \"uefi\". It is recommended to set an "
-"explicit value for this option. For hardware types which don't support "
-"setting boot mode, a future release will assume boot mode is set to UEFI if "
-"no boot mode is set to node's capabilities. It is also recommended to set "
-"``boot_mode`` into ``properties/capabilities`` of a node."
-
-msgid ""
-"A future release will change the default value of ``[deploy]/"
"default_boot_option`` from \"netboot\" to \"local\". To avoid disruptions, "
"it is recommended to set an explicit value for this option."
msgstr ""
@@ -613,6 +635,13 @@ msgstr ""
"mod_wsgi)."
msgid ""
+"A new class ``ironic.drivers.modules.agent.CustomAgentDeploy`` can be used "
+"as a base class for deploy interfaces based on ironic-python-agent."
+msgstr ""
+"A new class ``ironic.drivers.modules.agent.CustomAgentDeploy`` can be used "
+"as a base class for deploying interfaces based on ironic-python-agent."
+
+msgid ""
"A new configuration option ``[agent]require_tls`` allows rejecting ramdisk "
"callback URLs that don't use the ``https://`` schema."
msgstr ""
@@ -710,6 +739,15 @@ msgstr ""
"for the node, merely recording the returned state instead."
msgid ""
+"A new option ``[agent]api_ca_file`` allows passing a CA file to the ramdisk "
+"when ``redfish-virtual-media`` boot is used. Requires ironic-python-agent "
+"from the Wallaby cycle."
+msgstr ""
+"A new option ``[agent]api_ca_file`` allows passing a CA file to the ramdisk "
+"when ``redfish-virtual-media`` boot is used. Requires ironic-python-agent "
+"from the Wallaby cycle."
+
+msgid ""
"A node in the ``active`` provision state can be rescued via the ``GET /v1/"
"nodes/{node_ident}/states/provision`` API, by specifying ``rescue`` as the "
"``target`` value, and a ``rescue_password`` value. When the node has been "
@@ -766,6 +804,21 @@ msgstr ""
msgid ""
"A permission setting has been added for ``redfish-virtual-media`` boot "
"interface, which allows for explicit file permission setting when the driver "
+"is being used. The default for the new ``[redfish]file_permission setting is "
+"``0u644``, or 644 if manually changed using ``chmod`` on the command line. "
+"Operators MAY need to adjust this if they were running the conductor with a "
+"specific ``umask`` to work around the permission setting defect."
+msgstr ""
+"A permission setting has been added for ``redfish-virtual-media`` boot "
+"interface, which allows for explicit file permission setting when the driver "
+"is being used. The default for the new ``[redfish]file_permission setting is "
+"``0u644``, or 644 if manually changed using ``chmod`` on the command line. "
+"Operators MAY need to adjust this if they were running the conductor with a "
+"specific ``umask`` to work around the permission setting defect."
+
+msgid ""
+"A permission setting has been added for ``redfish-virtual-media`` boot "
+"interface, which allows for explicit file permission setting when the driver "
"is used. The default for the new ``[redfish]file_permission setting is "
"``0u644``, or 644 if manually changed using ``chmod`` on the command line. "
"Operators may need to check ``/httpboot/redfish`` folder permissions if "
@@ -815,10 +868,6 @@ msgstr ""
"Driver needs this verification because the machine is going to use a MAC "
"that will only be specified at the profile application."
-msgid "A warning is logged for any changes to immutable configuration options."
-msgstr ""
-"A warning is logged for any changes to immutable configuration options."
-
msgid "API fields to support node ``description`` and ``owner`` values."
msgstr "API fields to support node ``description`` and ``owner`` values."
@@ -830,18 +879,25 @@ msgstr ""
"net/ironic/+bug/1536828 for details."
msgid ""
-"API version 1.57 adds a REST API endpoint for updating an existing "
-"allocation. Only ``name`` and ``extra`` fields are allowed to be updated."
-msgstr ""
-"API version 1.57 adds a REST API endpoint for updating an existing "
-"allocation. Only ``name`` and ``extra`` fields are allowed to be updated."
-
-msgid ""
-"API version 1.58 allows backfilling allocations for existing deployed nodes "
-"by providing ``node`` to ``POST /v1/allocations``."
-msgstr ""
-"API version 1.58 allows backfilling allocations for existing deployed nodes "
-"by providing ``node`` to ``POST /v1/allocations``."
+"Ability to create an allocation has been restricted by a new policy rule "
+"``baremetal::allocation::create_pre_rbac`` which prevents creation of "
+"allocations by any project administrator when operating with the new Role "
+"Based Access Control model. The use and enforcement of this rule is disabled "
+"when ``[oslo_policy]enforce_new_defaults`` is set which also makes the "
+"population of a ``owner`` field for allocations to become automatically "
+"populated. Most deployments should not encounter any issues with this "
+"security change, and the policy rule will be removed when support for the "
+"legacy ``baremetal_admin`` custom role has been removed."
+msgstr ""
+"Ability to create an allocation has been restricted by a new policy rule "
+"``baremetal::allocation::create_pre_rbac`` which prevents creation of "
+"allocations by any project administrator when operating with the new Role "
+"Based Access Control model. The use and enforcement of this rule is disabled "
+"when ``[oslo_policy]enforce_new_defaults`` is set which also makes the "
+"population of a ``owner`` field for allocations to become automatically "
+"populated. Most deployments should not encounter any issues with this "
+"security change, and the policy rule will be removed when support for the "
+"legacy ``baremetal_admin`` custom role has been removed."
msgid "Add BIOS config to DRAC Driver"
msgstr "Add BIOS config to DRAC Driver"
@@ -868,13 +924,15 @@ msgid "Add Wake-On-Lan Power Driver"
msgstr "Add Wake-On-LAN Power Driver"
msgid ""
-"Add ``?detail=`` boolean query to the API list endpoints to provide a more "
-"RESTful alternative to the existing ``/nodes/detail`` and similar endpoints. "
-"The default is False. Now these API requests are possible:"
+"Add ``anaconda`` deploy interface to Ironic. This driver will deploy the OS "
+"using anaconda installer and kickstart file instead of IPA. To support this "
+"feature a new configuration group ``anaconda`` is added to Ironic "
+"configuration file along with ``default_ks_template`` configuration option."
msgstr ""
-"Add ``?detail=`` boolean query to the API list endpoints to provide a more "
-"RESTful alternative to the existing ``/nodes/detail`` and similar endpoints. "
-"The default is False. Now these API requests are possible:"
+"Add ``anaconda`` deploy interface to Ironic. This driver will deploy the OS "
+"using Anaconda installer and kickstart file instead of IPA. To support this "
+"feature a new configuration group ``anaconda`` is added to Ironic "
+"configuration file along with ``default_ks_template`` configuration option."
msgid ""
"Add ``choices`` parameter to config options. Invalid values will be rejected "
@@ -981,6 +1039,9 @@ msgstr ""
msgid "Added CORS support"
msgstr "Added CORS support"
+msgid "Added Cisco IMC driver"
+msgstr "Added Cisco IMC driver"
+
msgid ""
"Added configdrive support for whole disk images for iSCSI based deploy. This "
"will work for UEFI only or BIOS only images. It will not work for hybrid "
@@ -1018,6 +1079,24 @@ msgstr ""
"validate iLO SSL certificates."
msgid ""
+"Adding ``kernel`` and ``ramdisk`` is no longer necessary for partition "
+"images if ``image_type`` is set to ``partition`` and local boot is used."
+msgstr ""
+"Adding ``kernel`` and ``ramdisk`` is no longer necessary for partition "
+"images if ``image_type`` is set to ``partition`` and local boot is used."
+
+msgid ""
+"Adding new clean steps to ``ilo`` and ``ilo5`` hardware type - "
+"``security_parameters_update``, ``update_minimum_password_length``, and "
+"``update_auth_failure_logging_threshold`` which allows users to modify ilo "
+"system security settings."
+msgstr ""
+"Adding new clean steps to ``ilo`` and ``ilo5`` hardware type - "
+"``security_parameters_update``, ``update_minimum_password_length``, and "
+"``update_auth_failure_logging_threshold`` which allows users to modify ilo "
+"system security settings."
+
+msgid ""
"Addition of the provision state target verb of ``adopt`` which allows an "
"operator to move a node into an ``active`` state from ``manageable`` state, "
"without performing a deployment operation on the node. This can be used to "
@@ -1036,6 +1115,15 @@ msgid "Additionally, adds the following API changes:"
msgstr "Additionally, adds the following API changes:"
msgid ""
+"Additionally, as mentioned before, `ironic.drivers.modules.pxe.PXEDeploy` "
+"has moved to `ironic.drivers.modules.iscsi_deploy.ISCSIDeploy`, which will "
+"break drivers that use this class."
+msgstr ""
+"Additionally, as mentioned before, `ironic.drivers.modules.pxe.PXEDeploy` "
+"has moved to `ironic.drivers.modules.iscsi_deploy.ISCSIDeploy`, which will "
+"break drivers that use this class."
+
+msgid ""
"Addresses a condition where the Compute Service may have been unable to "
"remove VIF attachment records while a baremetal node is being unprovisiond. "
"This condition resulted in VIF records being orphaned, blocking future "
@@ -1119,15 +1207,6 @@ msgstr ""
"udp_transport_timeout`` allow to change the number of retries and the "
"timeout values respectively for the the SNMP driver."
-msgid ""
-"Adds SNMPv3 message authentication and encryption features to ironic "
-"``snmp`` hardware type. To enable these features, the following parameters "
-"should be used in the node's ``driver_info``:"
-msgstr ""
-"Adds SNMPv3 message authentication and encryption features to ironic "
-"``snmp`` hardware type. To enable these features, the following parameters "
-"should be used in the node's ``driver_info``:"
-
msgid "Adds ShellinaboxConsole support for virsh SSH driver."
msgstr "Adds ShellinaboxConsole support for virsh SSH driver."
@@ -1154,6 +1233,15 @@ msgstr ""
"nodes that are stuck in the rescue wait state."
msgid ""
+"Adds ``[conductor]clean_step_priority_override`` configuration parameter "
+"which allows the operator to define a custom order in which the cleaning "
+"steps are to run."
+msgstr ""
+"Adds ``[conductor]clean_step_priority_override`` configuration parameter "
+"which allows the operator to define a custom order in which the cleaning "
+"steps are to run."
+
+msgid ""
"Adds ``[swift]/endpoint_override`` option to explicitly set the endpoint URL "
"used for Swift. Ironic uses the Swift connection URL as a base for "
"generation of some TempURLs. Added parameter enables operators to fix the "
@@ -1181,8 +1269,11 @@ msgstr ""
"``instance_info`` (and ``extra`` if using metalsmith), and a lessee should "
"not be able to update all node attributes."
-msgid "Adds ``bios`` interface to the ``redfish`` hardware type."
-msgstr "Adds ``bios`` interface to the ``redfish`` hardware type."
+msgid "Adds ``bios_interface`` to the node list and node show api-ref."
+msgstr "Adds ``bios_interface`` to the node list and node show api-ref."
+
+msgid "Adds ``bios_interface`` to the node validate api-ref."
+msgstr "Adds ``bios_interface`` to the node validate api-ref."
msgid ""
"Adds ``command_timeout`` and ``max_command_attempts`` configuration options "
@@ -1192,15 +1283,6 @@ msgstr ""
"to IPA, so when connection errors occur the command will be executed again."
msgid ""
-"Adds ``command_timeout`` and ``max_command_attempts`` configuration options "
-"to IPA, so when connection errors occur the command will be executed again. "
-"The options are located in the ``[agent]`` section."
-msgstr ""
-"Adds ``command_timeout`` and ``max_command_attempts`` configuration options "
-"to IPA, so when connection errors occur the command will be executed again. "
-"The options are located in the ``[agent]`` section."
-
-msgid ""
"Adds ``driver_internal_info`` field to the node-related notification "
"``baremetal.node.provision_set.*``, new payload version 1.16."
msgstr ""
@@ -1208,28 +1290,6 @@ msgstr ""
"``baremetal.node.provision_set.*``, new payload version 1.16."
msgid ""
-"Adds ``external`` storage interface which is short for \"externally managed"
-"\". This adds logic to allow the Bare Metal service to identify when a BFV "
-"scenario is being requested based upon the configuration set for ``volume "
-"targets``."
-msgstr ""
-"Adds ``external`` storage interface which is short for \"externally managed"
-"\". This adds logic to allow the Bare Metal service to identify when a BFV "
-"scenario is being requested based upon the configuration set for ``volume "
-"targets``."
-
-msgid ""
-"Adds ``get_boot_mode``, ``set_boot_mode`` and ``get_supported_boot_modes`` "
-"methods to driver management interface. Drivers can override these methods "
-"implementing boot mode management calls to the BMC of the baremetal nodes "
-"being managed."
-msgstr ""
-"Adds ``get_boot_mode``, ``set_boot_mode`` and ``get_supported_boot_modes`` "
-"methods to driver management interface. Drivers can override these methods "
-"implementing boot mode management calls to the BMC of the baremetal nodes "
-"being managed."
-
-msgid ""
"Adds ``idrac`` hardware type support of a virtual media boot interface "
"implementation that utilizes the Redfish out-of-band (OOB) management "
"protocol and is compatible with the integrated Dell Remote Access Controller "
@@ -1308,17 +1368,6 @@ msgid ""
msgstr ""
"Adds ``rescue_interface`` field to the following node-related notifications:"
-msgid ""
-"Adds ``reset_idrac`` and ``known_good_state`` cleaning steps to hardware "
-"type ``idrac``. ``reset_idrac`` actually resets the iDRAC; "
-"``known_good_state`` also resets the iDRAC and clears the Lifecycle "
-"Controller job queue to make sure the iDRAC is in good state."
-msgstr ""
-"Adds ``reset_idrac`` and ``known_good_state`` cleaning steps to hardware "
-"type ``idrac``. ``reset_idrac`` actually resets the iDRAC; "
-"``known_good_state`` also resets the iDRAC and clears the Lifecycle "
-"Controller job queue to make sure the iDRAC is in good state."
-
msgid "Adds ``storage_interface`` field to the node-related notifications:"
msgstr "Adds ``storage_interface`` field to the node-related notifications:"
@@ -1378,27 +1427,6 @@ msgstr ""
"notifications."
msgid ""
-"Adds a ``[conductor]send_sensor_data_for_undeployed_nodes`` option to enable "
-"ironic to collect and transmit sensor data for all nodes for which sensor "
-"data collection is available. By default, this option is not enabled which "
-"aligns with the prior behavior of sensor data collection and transmission "
-"where such data was only collected if an ``instance_uuid`` was present to "
-"signify that the node has been or is being deployed. With this option set to "
-"``True``, operators may be able to identify hardware in a faulty state "
-"through the sensor data and take action before an instance workload is "
-"deployed."
-msgstr ""
-"Adds a ``[conductor]send_sensor_data_for_undeployed_nodes`` option to enable "
-"ironic to collect and transmit sensor data for all nodes for which sensor "
-"data collection is available. By default, this option is not enabled which "
-"aligns with the prior behaviour of sensor data collection and transmission "
-"where such data was only collected if an ``instance_uuid`` was present to "
-"signify that the node has been or is being deployed. With this option set to "
-"``True``, operators may be able to identify hardware in a faulty state "
-"through the sensor data and take action before an instance workload is "
-"deployed."
-
-msgid ""
"Adds a ``clear_job_queue`` cleaning step to the ``idrac-wsman`` management "
"interface. The ``clear_job_queue`` cleaning step clears the Lifecycle "
"Controller job queue including any pending jobs."
@@ -1532,38 +1560,6 @@ msgstr ""
"return tracebacks in API responses in an error condition."
msgid ""
-"Adds a configuration option ``[deploy]disk_erasure_concurrency`` to define "
-"the target pool size used by Ironic Python Agent ramdisk to erase disk "
-"devices. The number of threads created by IPA to erase disk devices is the "
-"minimum value of target pool size and the number of disks to be erased. This "
-"feature can greatly reduce the operation time for baremetals with multiple "
-"disks. For the backwards compatibility, the default value is 1."
-msgstr ""
-"Adds a configuration option ``[deploy]disk_erasure_concurrency`` to define "
-"the target pool size used by Ironic Python Agent ramdisk to erase disk "
-"devices. The number of threads created by IPA to erase disk devices is the "
-"minimum value of target pool size and the number of disks to be erased. This "
-"feature can greatly reduce the operation time for baremetals with multiple "
-"disks. For the backwards compatibility, the default value is 1."
-
-msgid ""
-"Adds a configuration option ``[ipmi]disable_boot_timeout`` which is used to "
-"set the default behavior whether ironic should send a raw IPMI command to "
-"disable timeout. This configuration option can be overidden by the per-node "
-"option ``ipmi_disable_boot_timeout`` in node's ``driver_info`` field. See "
-"`story 2004266 <https://storyboard.openstack.org/#!/story/2004266>`_ and "
-"`Story 2002977 <https://storyboard.openstack.org/#!/story/2002977>`_ for "
-"additional information."
-msgstr ""
-"Adds a configuration option ``[ipmi]disable_boot_timeout`` which is used to "
-"set the default behaviour whether ironic should send a raw IPMI command to "
-"disable timeout. This configuration option can be overridden by the per-node "
-"option ``ipmi_disable_boot_timeout`` in node's ``driver_info`` field. See "
-"`story 2004266 <https://storyboard.openstack.org/#!/story/2004266>`_ and "
-"`Story 2002977 <https://storyboard.openstack.org/#!/story/2002977>`_ for "
-"additional information."
-
-msgid ""
"Adds a configuration option ``webserver_verify_ca`` to support custom "
"certificates to validate URLs hosted on a HTTPS webserver."
msgstr ""
@@ -1663,30 +1659,6 @@ msgstr ""
"allocated from the configured port range for further use."
msgid ""
-"Adds a new configuration option ``[disk_utils]partprobe_attempts`` which "
-"defaults to 10. This is the maximum number of times to try to read a "
-"partition (if creating a config drive) via a ``partprobe`` command. "
-"Previously, no retries were done which caused failures. This addresses `bug "
-"1756760 <https://storyboard.openstack.org/#!/story/1756760>`_."
-msgstr ""
-"Adds a new configuration option ``[disk_utils]partprobe_attempts`` which "
-"defaults to 10. This is the maximum number of times to try to read a "
-"partition (if creating a config drive) via a ``partprobe`` command. "
-"Previously, no retries were done which caused failures. This addresses `bug "
-"1756760 <https://storyboard.openstack.org/#!/story/1756760>`_."
-
-msgid ""
-"Adds a new configuration option ``[disk_utils]partprobe_attempts`` which "
-"defaults to 10. This is the maximum number of times to try to read a "
-"partition (if creating a config drive) via a ``partprobe`` command. Set it "
-"to 1 if you want the previous behavior, where no retries were done."
-msgstr ""
-"Adds a new configuration option ``[disk_utils]partprobe_attempts`` which "
-"defaults to 10. This is the maximum number of times to try to read a "
-"partition (if creating a config drive) via a ``partprobe`` command. Set it "
-"to 1 if you want the previous behaviour, where no retries were done."
-
-msgid ""
"Adds a new configuration option ``[drac]boot_device_job_status_timeout`` "
"that specifies the maximum amount of time (in seconds) to wait for the boot "
"device configuration job to transition to the scheduled state to allow a "
@@ -1783,45 +1755,6 @@ msgstr ""
"v1/drivers/<name>."
msgid ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. Caution should be taken due to the timeout monitoring is "
-"shifted from ``inspecting`` to ``inspect wait``, please stop all running "
-"asynchronous hardware inspection or wait until it is finished before "
-"upgrading to the Rocky release. Otherwise nodes in asynchronous inspection "
-"will be left at ``inspecting`` state forever unless the database is manually "
-"updated."
-msgstr ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. Caution should be taken due to the timeout monitoring is "
-"shifted from ``inspecting`` to ``inspect wait``, please stop all running "
-"asynchronous hardware inspection or wait until it is finished before "
-"upgrading to the Rocky release. Otherwise nodes in asynchronous inspection "
-"will be left at ``inspecting`` state forever unless the database is manually "
-"updated."
-
-msgid ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. Returning ``INSPECTING`` from the ``inspect_hardware`` method "
-"of inspect interface is deprecated, ``INSPECTWAIT`` should be returned "
-"instead."
-msgstr ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. Returning ``INSPECTING`` from the ``inspect_hardware`` method "
-"of inspect interface is deprecated, ``INSPECTWAIT`` should be returned "
-"instead."
-
-msgid ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. The ``[conductor]inspect_timeout`` configuration option is "
-"deprecated for removal, please use ``[conductor]inspect_wait_timeout`` "
-"instead to specify the timeout of inspection process."
-msgstr ""
-"Adds an ``inspect wait`` state to handle asynchronous hardware "
-"introspection. The ``[conductor]inspect_timeout`` configuration option is "
-"deprecated for removal, please use ``[conductor]inspect_wait_timeout`` "
-"instead to specify the timeout of inspection process."
-
-msgid ""
"Adds an `agent_iboot` driver to allow use of the Iboot power driver with the "
"Agent deploy driver."
msgstr ""
@@ -1894,6 +1827,9 @@ msgstr "Current Series Release Notes"
msgid "Deprecated the 'parallel' option to periodic task decorator"
msgstr "Deprecated the 'parallel' option to periodic task decorator"
+msgid "Deprecated the bash ramdisk"
+msgstr "Deprecated the bash ramdisk"
+
msgid ""
"Drivers may optionally add a new BootInterface. This is merely a refactoring "
"of the Driver API to support future improvements."
@@ -1901,12 +1837,42 @@ msgstr ""
"Drivers may optionally add a new BootInterface. This is merely a refactoring "
"of the Driver API to support future improvements."
+msgid ""
+"Drivers using the \"agent\" deploy mechanism do not support \"rebuild --"
+"preserve-ephemeral\""
+msgstr ""
+"Drivers using the \"agent\" deploy mechanism do not support \"rebuild --"
+"preserve-ephemeral\""
+
+msgid ""
+"Fix a couple of locale issues with deployments, when running on a system "
+"using the Japanese locale"
+msgstr ""
+"Fix a couple of locale issues with deployments, when running on a system "
+"using the Japanese locale"
+
+msgid ""
+"IPMI Passwords are now obfuscated in REST API responses. This may be "
+"disabled by changing API policy settings."
+msgstr ""
+"IPMI Passwords are now obfuscated in REST API responses. This may be "
+"disabled by changing API policy settings."
+
msgid "Implemented a new Boot interface for drivers"
msgstr "Implemented a new Boot interface for drivers"
+msgid "Import Japanese translations - our first major translation addition!"
+msgstr "Import Japanese translations - our first major translation addition!"
+
msgid "Introduce new BootInterface to the Driver API"
msgstr "Introduce new BootInterface to the Driver API"
+msgid "Known issues"
+msgstr "Known issues"
+
+msgid "Liberty Series (4.0.0 - 4.2.5) Release Notes"
+msgstr "Liberty Series (4.0.0 - 4.2.5) Release Notes"
+
msgid "Migrations from Nova \"baremetal\" have been removed"
msgstr "Migrations from Nova \"baremetal\" have been removed"
@@ -1919,6 +1885,23 @@ msgstr "Newton Series (6.0.0 - 6.2.x) Release Notes"
msgid "Ocata Series (7.0.0 - 7.0.x) Release Notes"
msgstr "Ocata Series (7.0.0 - 7.0.x) Release Notes"
+msgid ""
+"Out of tree drivers may be broken by this release. The AgentDeploy and "
+"ISCSIDeploy (formerly known as PXEDeploy) classes now depend on drivers to "
+"utilize an instance of a BootInterface. For drivers that exist out of tree, "
+"that use these deploy classes, an error will be thrown during deployment. "
+"There is a simple fix. For drivers that expect these deploy classes to "
+"handle PXE booting, one can add the following code to the driver's "
+"`__init__` method::"
+msgstr ""
+"Out-of-tree drivers may be broken by this release. The AgentDeploy and "
+"ISCSIDeploy (formerly known as PXEDeploy) classes now depend on drivers to "
+"utilize an instance of a BootInterface. For drivers that exist out-of-tree, "
+"that use these deploy classes, an error will be thrown during deployment. "
+"There is a simple fix. For drivers that expect these deploy classes to "
+"handle PXE booting, one can add the following code to the driver's "
+"`__init__` method::"
+
msgid "PXE drivers now support GRUB2"
msgstr "PXE drivers now support GRUB2"
@@ -1964,6 +1947,13 @@ msgid "Support for the new ENROLL workflow during Node creation"
msgstr "Support for the new ENROLL workflow during Node creation"
msgid ""
+"The \"agent\" class of drivers now support both whole-disk and partition "
+"based images."
+msgstr ""
+"The \"agent\" class of drivers now support both whole-disk and partition "
+"based images."
+
+msgid ""
"The Ironic API now has support for CORS requests, that may be used by, for "
"example, web browser-based clients. This is configured in the [cors] section "
"of ironic.conf."
@@ -1972,6 +1962,33 @@ msgstr ""
"example, web browser-based clients. This is configured in the [cors] section "
"of ironic.conf."
+msgid "The Ironic team apologizes profusely for this inconvenience."
+msgstr "The Ironic team apologises profusely for this inconvenience."
+
+msgid ""
+"The agent must download the tenant image in full before writing it to disk. "
+"As such, the server being deployed must have enough RAM for running the "
+"agent and storing the image. This is now checked before Ironic tells the "
+"agent to deploy an image. An optional config [agent]memory_consumed_by_agent "
+"is provided. When Ironic does this check, this config option may be set to "
+"factor in the amount of RAM to reserve for running the agent."
+msgstr ""
+"The agent must download the tenant image in full before writing it to disk. "
+"As such, the server being deployed must have enough RAM for running the "
+"agent and storing the image. This is now checked before Ironic tells the "
+"agent to deploy an image. An optional config [agent]memory_consumed_by_agent "
+"is provided. When Ironic does this check, this config option may be set to "
+"factor in the amount of RAM to reserve for running the agent."
+
+msgid ""
+"This brings some bug fixes and small features on top of Ironic 4.0.0. Major "
+"changes are listed below, and full release details are available on "
+"Launchpad: https://launchpad.net/ironic/liberty/4.1.0."
+msgstr ""
+"This brings some bug fixes and small features on top of Ironic 4.0.0. Major "
+"changes are listed below, and full release details are available on "
+"Launchpad: https://launchpad.net/ironic/liberty/4.1.0."
+
msgid ""
"This change enhances the driver interface for driver authors, and should not "
"affect users of Ironic, by splitting control of booting a server from the "
@@ -1986,6 +2003,15 @@ msgstr ""
"image to a server."
msgid ""
+"This driver supports managing Cisco UCS C-series servers through the CIMC "
+"API, rather than IPMI. Documentation is available at: https://docs.openstack."
+"org/developer/ironic/drivers/cimc.html"
+msgstr ""
+"This driver supports managing Cisco UCS C-series servers through the CIMC "
+"API, rather than IPMI. Documentation is available at: https://docs.openstack."
+"org/developer/ironic/drivers/cimc.html"
+
+msgid ""
"This is the first semver-versioned release of Ironic, created during the "
"OpenStack \"Liberty\" development cycle. It marks a pivot in our versioning "
"schema from date-based versioning; the previous released version was 2015.1. "
@@ -1998,6 +2024,24 @@ msgstr ""
"2015.1. Full release details are available on Launchpad: https://launchpad."
"net/ironic/liberty/4.0.0."
+msgid ""
+"This release is a patch release on top of 4.2.0, as part of the stable "
+"Liberty series. Full details are available on Launchpad: https://launchpad."
+"net/ironic/liberty/4.2.1."
+msgstr ""
+"This release is a patch release on top of 4.2.0, as part of the stable "
+"Liberty series. Full details are available on Launchpad: https://launchpad."
+"net/ironic/liberty/4.2.1."
+
+msgid ""
+"This release is proposed as the stable Liberty release for Ironic, and "
+"brings with it some bug fixes and small features. Full release details are "
+"available on Launchpad: https://launchpad.net/ironic/liberty/4.2.0."
+msgstr ""
+"This release is proposed as the stable Liberty release for Ironic, and "
+"brings with it some bug fixes and small features. Full release details are "
+"available on Launchpad: https://launchpad.net/ironic/liberty/4.2.0."
+
msgid "Train Series (12.2.0 - 13.0.x) Release Notes"
msgstr "Train Series (12.2.0 - 13.0.x) Release Notes"
@@ -2010,11 +2054,41 @@ msgstr "Victoria Series (15.1.0 - 16.0.x) Release Notes"
msgid "Wallaby Series (16.1.0 - 17.0.x) Release Notes"
msgstr "Wallaby Series (16.1.0 - 17.0.x) Release Notes"
+msgid ""
+"While Ironic does include a ClusteredComputeManager, which allows running "
+"more than one nova-compute process with Ironic, it should be considered "
+"experimental and has many known problems."
+msgstr ""
+"While Ironic does include a ClusteredComputeManager, which allows running "
+"more than one nova-compute process with Ironic, it should be considered "
+"experimental and has many known problems."
+
msgid "Xena Series (18.0.0 - 18.2.x) Release Notes"
msgstr "Xena Series (18.0.0 - 18.2.x) Release Notes"
-msgid "Yoga Series Release Notes"
-msgstr "Yoga Series Release Notes"
+msgid "Yoga Series (19.0.0 - 20.1.x) Release Notes"
+msgstr "Yoga Series (19.0.0 - 20.1.x) Release Notes"
+
+msgid "Zed Series (20.2.0 - 21.1.x) Release Notes"
+msgstr "Zed Series (20.2.0 - 21.1.x) Release Notes"
+
+msgid ""
+"iLO driver documentation is available at: https://docs.openstack.org/"
+"developer/ironic/drivers/ilo.html"
+msgstr ""
+"iLO driver documentation is available at: https://docs.openstack.org/"
+"developer/ironic/drivers/ilo.html"
+
+msgid ""
+"iLO virtual media drivers (iscsi_ilo and agent_ilo) can work standalone "
+"without Swift, by configuring an HTTP(S) server for hosting the deploy/boot "
+"images. A web server needs to be running on every conductor node and needs "
+"to be configured in ironic.conf."
+msgstr ""
+"iLO virtual media drivers (iscsi_ilo and agent_ilo) can work standalone "
+"without Swift, by configuring an HTTP(S) server for hosting the deploy/boot "
+"images. A web server needs to be running on every conductor node and needs "
+"to be configured in ironic.conf."
msgid "ipmitool driver supports IPMI v1.5"
msgstr "ipmitool driver supports IPMI v1.5"
diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst
new file mode 100644
index 000000000..f5f672a2d
--- /dev/null
+++ b/releasenotes/source/zed.rst
@@ -0,0 +1,6 @@
+==========================================
+Zed Series (20.2.0 - 21.1.x) Release Notes
+==========================================
+
+.. release-notes::
+ :branch: stable/zed
diff --git a/reno.yaml b/reno.yaml
deleted file mode 100644
index dd0aac790..000000000
--- a/reno.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# Ignore the kilo-eol tag because that branch does not work with reno
-# and contains no release notes.
-closed_branch_tag_re: "(.+)(?<!kilo)-eol"
diff --git a/requirements.txt b/requirements.txt
index 24c09f50c..2f4813baa 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,7 +6,7 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=3.1.1 # Apache-2.0
-SQLAlchemy>=1.2.19 # MIT
+SQLAlchemy>=1.4.0 # MIT
alembic>=1.4.2 # MIT
automaton>=1.9.0 # Apache-2.0
eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT
@@ -14,7 +14,7 @@ WebOb>=1.7.1 # MIT
python-cinderclient!=4.0.0,>=3.3.0 # Apache-2.0
python-glanceclient>=2.8.0 # Apache-2.0
keystoneauth1>=4.2.0 # Apache-2.0
-ironic-lib>=4.6.1 # Apache-2.0
+ironic-lib>=5.4.0 # Apache-2.0
python-swiftclient>=3.2.0 # Apache-2.0
pytz>=2013.6 # MIT
stevedore>=1.29.0 # Apache-2.0
@@ -39,7 +39,7 @@ rfc3986>=1.2.0 # Apache-2.0
jsonpatch!=1.20,>=1.16 # BSD
Jinja2>=3.0.0 # BSD License (3 clause)
keystonemiddleware>=9.5.0 # Apache-2.0
-oslo.messaging>=5.29.0 # Apache-2.0
+oslo.messaging>=14.1.0 # Apache-2.0
tenacity>=6.2.0 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
jsonschema>=3.2.0 # MIT
@@ -47,4 +47,4 @@ psutil>=3.2.2 # BSD
futurist>=1.2.0 # Apache-2.0
tooz>=2.7.0 # Apache-2.0
openstacksdk>=0.48.0 # Apache-2.0
-sushy>=3.10.0
+sushy>=4.3.0
diff --git a/setup.cfg b/setup.cfg
index 9b4366a84..915d50ccc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -52,6 +52,7 @@ wsgi_scripts =
ironic-api-wsgi = ironic.api.wsgi:initialize_wsgi_app
ironic.dhcp =
+ dnsmasq = ironic.dhcp.dnsmasq:DnsmasqDHCPApi
neutron = ironic.dhcp.neutron:NeutronDHCPApi
none = ironic.dhcp.none:NoneDHCPApi
@@ -167,6 +168,7 @@ ironic.hardware.interfaces.vendor =
idrac-wsman = ironic.drivers.modules.drac.vendor_passthru:DracWSManVendorPassthru
idrac-redfish = ironic.drivers.modules.drac.vendor_passthru:DracRedfishVendorPassthru
ilo = ironic.drivers.modules.ilo.vendor:VendorPassthru
+ irmc = ironic.drivers.modules.irmc.vendor:IRMCVendorPassthru
ipmitool = ironic.drivers.modules.ipmitool:VendorPassthru
no-vendor = ironic.drivers.modules.noop:NoVendor
redfish = ironic.drivers.modules.redfish.vendor:RedfishVendorPassthru
diff --git a/setup.py b/setup.py
index f63cc23c5..e5bafb013 100644
--- a/setup.py
+++ b/setup.py
@@ -13,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
- pbr=True)
+ pbr=True,
+)
diff --git a/test-requirements.txt b/test-requirements.txt
index bd29d9394..0c4bdb0ca 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -11,7 +11,7 @@ oslo.reports>=1.18.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
stestr>=2.0.0 # Apache-2.0
psycopg2>=2.8.5 # LGPL/ZPL
-testtools>=2.2.0 # MIT
+testtools>=2.5.0 # MIT
WebTest>=2.0.27 # MIT
pysnmp>=4.4.12
bandit!=1.6.0,>=1.1.0,<2.0.0 # Apache-2.0
diff --git a/tools/benchmark/do_not_run_create_benchmark_data.py b/tools/benchmark/do_not_run_create_benchmark_data.py
index afdb3c7f2..d738e1285 100644
--- a/tools/benchmark/do_not_run_create_benchmark_data.py
+++ b/tools/benchmark/do_not_run_create_benchmark_data.py
@@ -10,7 +10,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import random
import sys
import time
@@ -20,31 +20,54 @@ from sqlalchemy import sql
from ironic.common import service
from ironic.conf import CONF # noqa To Load Configuration
from ironic.objects import node
+from ironic.objects import port
+
+
+NODE_COUNT = 10000
+PORTS_PER_NODE = 2
+
+
+# NOTE(hjensas): Mostly copy-paste from Nova
+def generate_mac_address():
+ """Generate an Ethernet MAC address."""
+ mac = [random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ return ':'.join(map(lambda x: "%02x" % x, mac))
+
+
+def _create_test_node_ports(new_node):
+ for i in range(0, PORTS_PER_NODE):
+ new_port = port.Port()
+ new_port.node_id = new_node.id
+ new_port.address = generate_mac_address()
+ new_port.pxe_enabled = True
+ new_port.create()
def _create_test_nodes():
print("Starting creation of fake nodes.")
start = time.time()
- node_count = 10000
checkin = time.time()
- for i in range(0, node_count):
-
- new_node = node.Node({
- 'power_state': 'power off',
- 'driver': 'ipmi',
- 'driver_internal_info': {'test-meow': i},
- 'name': 'BenchmarkTestNode-%s' % i,
- 'driver_info': {
- 'ipmi_username': 'admin',
- 'ipmi_password': 'admin',
- 'ipmi_address': 'testhost%s.env.top.level.domain' % i},
- 'resource_class': 'CUSTOM_BAREMETAL',
- 'properties': {
- 'cpu': 4,
- 'memory': 32,
- 'cats': i,
- 'meowing': True}})
+ for i in range(0, NODE_COUNT):
+ new_node = node.Node()
+ new_node.power_state = 'power off'
+ new_node.driver = 'ipmi'
+ new_node.driver_internal_info = {'test-meow': i}
+ new_node.name = 'BenchmarkTestNode-%s' % i
+ new_node.driver_info = {
+ 'ipmi_username': 'admin', 'ipmi_password': 'admin',
+ 'ipmi_address': 'testhost%s.env.top.level.domain' % i}
+ new_node.resource_class = 'CUSTOM_BAREMETAL'
+ new_node.properties = {'cpu': 4,
+ 'memory': 32,
+ 'cats': i,
+ 'meowing': True}
new_node.create()
+ _create_test_node_ports(new_node)
delta = time.time() - checkin
if delta > 10:
checkin = time.time()
@@ -52,7 +75,7 @@ def _create_test_nodes():
% (i, delta, time.time() - start))
created = time.time()
elapse = created - start
- print('Created %s nodes in %s seconds.\n' % (node_count, elapse))
+ print('Created %s nodes in %s seconds.\n' % (NODE_COUNT, elapse))
def _mix_up_nodes_data():
diff --git a/tools/benchmark/generate-statistics.py b/tools/benchmark/generate-statistics.py
index 740c3be08..e8327f3ac 100644
--- a/tools/benchmark/generate-statistics.py
+++ b/tools/benchmark/generate-statistics.py
@@ -21,6 +21,7 @@ import oslo_policy
from oslo_utils import timeutils
from ironic.api.controllers.v1 import node as node_api
+from ironic.api.controllers.v1 import port as port_api
from ironic.api.controllers.v1 import utils as api_utils
from ironic.common import context
from ironic.common import service
@@ -28,6 +29,7 @@ from ironic.conf import CONF # noqa To Load Configuration
from ironic.db import api as db_api
from ironic.objects import conductor
from ironic.objects import node
+from ironic.objects import port
def _calculate_delta(start, finish):
@@ -56,6 +58,24 @@ def _assess_db_performance():
return node_count
+def _assess_db_performance_ports():
+ start = time.time()
+ dbapi = db_api.get_instance()
+ print('Phase - Assess DB performance - Ports')
+ _add_a_line()
+ got_connection = time.time()
+ ports = dbapi.get_port_list()
+ port_count = len(ports)
+ query_complete = time.time()
+ delta = _calculate_delta(start, got_connection)
+ print('Obtained DB client in %s seconds.' % delta)
+ delta = _calculate_delta(got_connection, query_complete)
+ print('Returned %s ports in python %s seconds from the DB.\n' %
+ (port_count, delta))
+ # return node count for future use.
+ return port_count
+
+
def _assess_db_and_object_performance():
print('Phase - Assess DB & Object conversion Performance')
_add_a_line()
@@ -88,6 +108,33 @@ def _assess_db_and_object_performance():
observed_vendors.append(vendor)
+def _assess_db_and_object_performance_ports():
+ print('Phase - Assess DB & Object conversion Performance - Ports')
+ _add_a_line()
+ start = time.time()
+ port_list = port.Port().list(context.get_admin_context())
+ got_list = time.time()
+ delta = _calculate_delta(start, got_list)
+ print('Obtained list of port objects in %s seconds.' % delta)
+ count = 0
+ tbl_size = 0
+ # In a sense, this helps provide a relative understanding if the
+ # database is the bottleneck, or the objects post conversion.
+ # converting completely to json and then measuring the size helps
+ # ensure that everything is "assessed" while not revealing too
+ # much detail.
+ for port_obj in port_list:
+ # Just looping through the entire set to count should be
+ # enough to ensure that the entry is loaded from the db
+ # and then converted to an object.
+ tbl_size = tbl_size + sys.getsizeof(port_obj.as_dict())
+ count = count + 1
+ delta = _calculate_delta(got_list, time.time())
+ print('Took %s seconds to iterate through %s port objects.' %
+ (delta, count))
+ print('Ports table is roughly %s bytes of JSON.\n' % tbl_size)
+
+
@mock.patch('ironic.api.request') # noqa patch needed for the object model
@mock.patch.object(metrics_utils, 'get_metrics_logger', lambda *_: mock.Mock)
@mock.patch.object(api_utils, 'check_list_policy', lambda *_: None)
@@ -155,6 +202,68 @@ def _assess_db_object_and_api_performance(mock_log, mock_request):
'nodes API call pattern.\n' % (delta, total_nodes))
+
+@mock.patch('ironic.api.request') # noqa patch needed for the object model
+@mock.patch.object(metrics_utils, 'get_metrics_logger', lambda *_: mock.Mock)
+@mock.patch.object(api_utils, 'check_list_policy', lambda *_: None)
+@mock.patch.object(api_utils, 'check_allow_specify_fields', lambda *_: None)
+@mock.patch.object(api_utils, 'check_allowed_fields', lambda *_: None)
+@mock.patch.object(oslo_policy.policy, 'LOG', autospec=True)
+def _assess_db_object_and_api_performance_ports(mock_log, mock_request):
+ print('Phase - Assess DB & Object conversion Performance - Ports')
+ _add_a_line()
+ # Just mock it to silence it since getting the logger to update
+ # config seems like not a thing once started. :\
+ mock_log.debug = mock.Mock()
+ # Internal logic requires major/minor versions and a context to
+ # proceed. This is just to make the NodesController respond properly.
+ mock_request.context = context.get_admin_context()
+ mock_request.version.major = 1
+ mock_request.version.minor = 71
+
+ start = time.time()
+ port_api_controller = port_api.PortsController()
+ port_api_controller.context = context.get_admin_context()
+ fields = ("uuid,node_uuid,address,extra,local_link_connection,"
+ "pxe_enabled,internal_info,physical_network,"
+ "is_smartnic")
+
+ total_ports = 0
+
+ res = port_api_controller._get_ports_collection(
+ resource_url='ports',
+ node_ident=None,
+ address=None,
+ portgroup_ident=None,
+ marker=None,
+ limit=None,
+ sort_key="id",
+ sort_dir="asc",
+ fields=fields.split(','))
+ total_ports = len(res['ports'])
+ while len(res['ports']) != 1:
+ print(" ** Getting ports ** %s Elapsed: %s seconds." %
+ (total_ports, _calculate_delta(start, time.time())))
+ res = port_api_controller._get_ports_collection(
+ resource_url='ports',
+ node_ident=None,
+ address=None,
+ portgroup_ident=None,
+ marker=res['ports'][-1]['uuid'],
+ limit=None,
+ sort_key="id",
+ sort_dir="asc",
+ fields=fields.split(','))
+ new_ports = len(res['ports'])
+ if new_ports == 0:
+ break
+ total_ports = total_ports + new_ports
+
+ delta = _calculate_delta(start, time.time())
+ print('Took %s seconds to return all %s ports via '
+ 'ports API call pattern.\n' % (delta, total_ports))
+
+
def _report_conductors():
print('Phase - identifying conductors/drivers')
_add_a_line()
@@ -190,6 +299,9 @@ def main():
_assess_db_performance()
_assess_db_and_object_performance()
_assess_db_object_and_api_performance()
+ _assess_db_performance_ports()
+ _assess_db_and_object_performance_ports()
+ _assess_db_object_and_api_performance_ports()
_report_conductors()
diff --git a/tox.ini b/tox.ini
index cffaa8f1d..97ea9f707 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,5 @@
[tox]
minversion = 3.18.0
-skipsdist = True
envlist = py3,pep8
ignore_basepython_conflict=true
@@ -8,17 +7,23 @@ ignore_basepython_conflict=true
usedevelop = True
basepython = python3
setenv = VIRTUAL_ENV={envdir}
- PYTHONDONTWRITEBYTECODE = 1
+ PYTHONDONTWRITEBYTECODE=1
LANGUAGE=en_US
LC_ALL=en_US.UTF-8
- PYTHONWARNINGS=default::DeprecationWarning
+ PYTHONUNBUFFERED=1
+ SQLALCHEMY_WARN_20=true
deps =
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
stestr run --slowest {posargs}
-passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
+passenv = http_proxy
+ HTTP_PROXY
+ https_proxy
+ HTTPS_PROXY
+ no_proxy
+ NO_PROXY
[testenv:unit-with-driver-libs]
deps = {[testenv]deps}
@@ -39,6 +44,8 @@ deps=
Pygments>=2.2.0 # BSD
bashate>=0.5.1 # Apache-2.0
allowlist_externals = bash
+ {toxinidir}/tools/run_bashate.sh
+ {toxinidir}/tools/check-releasenotes.py
commands =
bash tools/flake8wrap.sh {posargs}
# Run bashate during pep8 runs to ensure violations are caught by
diff --git a/zuul.d/ironic-jobs.yaml b/zuul.d/ironic-jobs.yaml
index c9b969d4f..ca1757417 100644
--- a/zuul.d/ironic-jobs.yaml
+++ b/zuul.d/ironic-jobs.yaml
@@ -85,7 +85,6 @@
q-dhcp: true
q-l3: true
q-meta: true
- q-metering: true
q-svc: true
ovn-controller: false
ovn-northd: false
@@ -217,6 +216,48 @@
s-proxy: False
- job:
+ name: ironic-standalone-anaconda
+ parent: ironic-standalone-redfish
+ description:
+ Test ironic with the anaconda deployment interface.
+ Test also uses Redfish.
+ required-projects:
+ - opendev.org/openstack/sushy-tools
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^api-ref/.*$
+ - ^doc/.*$
+ - ^install-guide/.*$
+ - ^ironic/locale/.*$
+ - ^ironic/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^test-requirements.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ vars:
+ tempest_test_regex: BaremetalRedfishIPxeAnacondaNoGlance
+ tempest_test_timeout: 4800
+ tempest_concurrency: 2
+ devstack_localrc:
+ IRONIC_ENABLED_DEPLOY_INTERFACES: "anaconda"
+ IRONIC_VM_COUNT: 2
+ IRONIC_VM_VOLUME_COUNT: 1
+ IRONIC_VM_SPECS_RAM: 3192
+ IRONIC_VM_SPECS_CPU: 3
+ IRONIC_ENFORCE_SCOPE: True
+ # We're using a lot of disk space in this job. Some testing nodes have
+ # a small root partition, so use /opt which is mounted from a bigger
+ # ephemeral partition on such nodes
+ LIBVIRT_STORAGE_POOL_PATH: /opt/libvirt/images
+ IRONIC_ANACONDA_IMAGE_REF: https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/
+ IRONIC_ANACONDA_KERNEL_REF: https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/pxeboot/vmlinuz
+ IRONIC_ANACONDA_RAMDISK_REF: https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/images/pxeboot/initrd.img
+ IRONIC_ANACONDA_INSECURE_HEARTBEAT: True
+ IRONIC_DEPLOY_CALLBACK_WAIT_TIMEOUT: 3600
+ IRONIC_PXE_BOOT_RETRY_TIMEOUT: 3600
+
+- job:
name: ironic-tempest-bios-redfish-pxe
description: "Deploy ironic node over PXE using BIOS boot mode"
parent: ironic-tempest-uefi-redfish-vmedia
@@ -258,6 +299,9 @@
# result and makes this job VERY sensitive to heavy disk IO of the
# underlying hypervisor/cloud.
IRONIC_CALLBACK_TIMEOUT: 800
+ IRONIC_GRUB2_SHIM_FILE: https://mirror.iad3.inmotion.opendev.org/centos-stream/9-stream/BaseOS/x86_64/os/EFI/BOOT/BOOTX64.EFI
+ IRONIC_GRUB2_FILE: https://mirror.iad3.inmotion.opendev.org/centos-stream/9-stream/BaseOS/x86_64/os/EFI/BOOT/grubx64.efi
+ IRONIC_GRUB2_CONFIG_PATH: EFI/BOOT/grub.cfg
devstack_services:
s-account: True
s-container: True
@@ -660,7 +704,6 @@
IRONIC_IPXE_ENABLED: False
IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_AUTOMATED_CLEAN_ENABLED: False
- IRONIC_VM_SPECS_RAM: 4096
- job:
# Security testing for known issues
@@ -848,6 +891,7 @@
IRONIC_AUTOMATED_CLEAN_ENABLED: False
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
SWIFT_ENABLE_TEMPURLS: True
SWIFT_TEMPURL_KEY: secretkey
EBTABLES_RACE_FIX: True
@@ -868,6 +912,18 @@
cinder: False
ir-api: True
ir-cond: True
+ # Neutron services
+ # In the Ironic grenade job we want to explicitly enable ML2/OVS agents
+ # and disable OVN
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-svc: true
+ q-metering: true
+ ovn-controller: false
+ ovn-northd: false
+ q-ovn-metadata-agent: false
tempest_plugins:
- ironic-tempest-plugin
tempest_test_regex: ironic_tempest_plugin.tests.scenario
@@ -1030,6 +1086,7 @@
- job:
name: ironic-cross-sushy
+ nodeset: ubuntu-jammy
description: Ironic unit tests run with Sushy from source
parent: openstack-tox
required-projects:
@@ -1044,10 +1101,11 @@
- ^tools/.*$
vars:
# NOTE(dtantsur): change this every release cycle if needed.
- bindep_profile: test py38
- tox_envlist: py38
+ bindep_profile: test py310
+ tox_envlist: py310
# This variable ensures that sushy is installed from source.
tox_install_siblings: true
# NOTE(dtantsur): this job will be run on sushy as well, so it's
# important to set the working dir to the Ironic checkout.
zuul_work_dir: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/ironic'].src_dir }}"
+
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 8b821f816..0f7ff75e1 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -2,8 +2,8 @@
templates:
- check-requirements
- openstack-cover-jobs
- - openstack-python3-zed-jobs
- - openstack-python3-zed-jobs-arm64
+ - openstack-python3-jobs
+ - openstack-python3-jobs-arm64
- periodic-stable-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
@@ -45,6 +45,8 @@
voting: false
- ironic-tempest-ipxe-ipv6:
voting: false
+ - ironic-standalone-anaconda:
+ voting: false
- ironic-inspector-tempest-rbac-scope-enforced:
voting: false
- bifrost-integration-tinyipa-ubuntu-focal: