summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bindep.txt2
-rw-r--r--devstack/files/debs/ironic3
-rw-r--r--devstack/lib/ironic27
-rw-r--r--doc/requirements.txt1
-rw-r--r--doc/source/admin/agent-power.rst76
-rw-r--r--doc/source/admin/agent-token.rst2
-rw-r--r--doc/source/admin/boot-from-volume.rst2
-rw-r--r--doc/source/admin/drivers/redfish.rst23
-rw-r--r--doc/source/admin/fast-track.rst50
-rw-r--r--doc/source/admin/index.rst21
-rw-r--r--doc/source/admin/inspection.rst2
-rw-r--r--doc/source/admin/node-deployment.rst52
-rw-r--r--doc/source/admin/raid.rst12
-rw-r--r--doc/source/admin/troubleshooting.rst77
-rw-r--r--doc/source/conf.py2
-rw-r--r--doc/source/contributor/releasing.rst91
-rw-r--r--doc/source/install/configure-pxe.rst59
-rw-r--r--doc/source/install/enrollment.rst7
-rw-r--r--ironic/api/args.py381
-rw-r--r--ironic/api/controllers/link.py39
-rw-r--r--ironic/api/controllers/root.py51
-rw-r--r--ironic/api/controllers/v1/__init__.py192
-rw-r--r--ironic/api/controllers/v1/allocation.py8
-rw-r--r--ironic/api/controllers/v1/bios.py12
-rw-r--r--ironic/api/controllers/v1/chassis.py36
-rw-r--r--ironic/api/controllers/v1/collection.py4
-rw-r--r--ironic/api/controllers/v1/conductor.py12
-rw-r--r--ironic/api/controllers/v1/deploy_template.py12
-rw-r--r--ironic/api/controllers/v1/driver.py32
-rw-r--r--ironic/api/controllers/v1/node.py66
-rw-r--r--ironic/api/controllers/v1/port.py12
-rw-r--r--ironic/api/controllers/v1/portgroup.py22
-rw-r--r--ironic/api/controllers/v1/state.py3
-rw-r--r--ironic/api/controllers/v1/utils.py14
-rw-r--r--ironic/api/controllers/v1/volume.py25
-rw-r--r--ironic/api/controllers/v1/volume_connector.py16
-rw-r--r--ironic/api/controllers/v1/volume_target.py16
-rw-r--r--ironic/api/controllers/version.py57
-rw-r--r--ironic/api/expose.py20
-rw-r--r--ironic/api/method.py95
-rw-r--r--ironic/api/types.py744
-rw-r--r--ironic/common/exception.py90
-rw-r--r--ironic/common/images.py30
-rw-r--r--ironic/common/json_rpc/server.py4
-rw-r--r--ironic/common/neutron.py159
-rw-r--r--ironic/common/pxe_utils.py29
-rw-r--r--ironic/common/utils.py12
-rw-r--r--ironic/conductor/deployments.py9
-rw-r--r--ironic/conductor/manager.py24
-rw-r--r--ironic/conductor/rpcapi.py1
-rw-r--r--ironic/conductor/task_manager.py17
-rw-r--r--ironic/conductor/utils.py47
-rw-r--r--ironic/conf/deploy.py11
-rw-r--r--ironic/conf/pxe.py23
-rw-r--r--ironic/dhcp/neutron.py19
-rw-r--r--ironic/drivers/base.py12
-rw-r--r--ironic/drivers/drac.py8
-rw-r--r--ironic/drivers/generic.py3
-rw-r--r--ironic/drivers/ilo.py8
-rw-r--r--ironic/drivers/modules/agent.py127
-rw-r--r--ironic/drivers/modules/agent_base.py152
-rw-r--r--ironic/drivers/modules/agent_client.py83
-rw-r--r--ironic/drivers/modules/agent_power.py220
-rw-r--r--ironic/drivers/modules/ansible/deploy.py58
-rw-r--r--ironic/drivers/modules/console_utils.py4
-rw-r--r--ironic/drivers/modules/deploy_utils.py68
-rw-r--r--ironic/drivers/modules/drac/raid.py3
-rw-r--r--ironic/drivers/modules/ilo/common.py27
-rw-r--r--ironic/drivers/modules/ilo/management.py54
-rw-r--r--ironic/drivers/modules/ipmitool.py46
-rw-r--r--ironic/drivers/modules/iscsi_deploy.py57
-rw-r--r--ironic/drivers/modules/network/common.py50
-rw-r--r--ironic/drivers/modules/pxe_base.py5
-rw-r--r--ironic/drivers/modules/redfish/boot.py49
-rw-r--r--ironic/drivers/modules/redfish/management.py2
-rw-r--r--ironic/objects/fields.py7
-rw-r--r--ironic/objects/node.py4
-rw-r--r--ironic/tests/base.py2
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_deploy_template.py28
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_expose.py4
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_utils.py28
-rw-r--r--ironic/tests/unit/api/test_args.py506
-rw-r--r--ironic/tests/unit/api/test_hooks.py2
-rw-r--r--ironic/tests/unit/api/test_types.py566
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_network_show.json33
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_network_show_ipv6.json33
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_port_show.json59
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_port_show_ipv6.json59
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_subnet_show.json32
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_subnet_show_ipv6.json32
-rw-r--r--ironic/tests/unit/common/test_images.py21
-rw-r--r--ironic/tests/unit/common/test_json_rpc.py2
-rw-r--r--ironic/tests/unit/common/test_neutron.py123
-rw-r--r--ironic/tests/unit/common/test_pxe_utils.py26
-rw-r--r--ironic/tests/unit/conductor/test_deployments.py34
-rw-r--r--ironic/tests/unit/conductor/test_manager.py149
-rw-r--r--ironic/tests/unit/conductor/test_task_manager.py18
-rw-r--r--ironic/tests/unit/conductor/test_utils.py35
-rw-r--r--ironic/tests/unit/drivers/modules/ansible/test_deploy.py135
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_common.py23
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_management.py85
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_common.py8
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_flat.py5
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_neutron.py9
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_boot.py200
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_management.py31
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent.py471
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent_base.py482
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent_client.py34
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent_power.py127
-rw-r--r--ironic/tests/unit/drivers/modules/test_console_utils.py4
-rw-r--r--ironic/tests/unit/drivers/modules/test_deploy_utils.py64
-rw-r--r--ironic/tests/unit/drivers/modules/test_ipmitool.py37
-rw-r--r--ironic/tests/unit/drivers/modules/test_ipxe.py15
-rw-r--r--ironic/tests/unit/drivers/modules/test_iscsi_deploy.py202
-rw-r--r--ironic/tests/unit/drivers/test_drac.py17
-rw-r--r--ironic/tests/unit/drivers/test_ilo.py25
-rw-r--r--ironic/tests/unit/objects/test_fields.py9
-rw-r--r--ironic/tests/unit/objects/test_node.py2
-rw-r--r--lower-constraints.txt2
-rw-r--r--releasenotes/notes/add-redfish-boot_iso-pass-through-8a6f4d0c98ada1d5.yaml8
-rw-r--r--releasenotes/notes/agent-power-a000fdf37cb870e4.yaml6
-rw-r--r--releasenotes/notes/agent-raid-647acfd599e83476.yaml5
-rw-r--r--releasenotes/notes/destroy-broken-8b13de8382199aca.yaml5
-rw-r--r--releasenotes/notes/direct-deploy-steps-36486987156017d7.yaml17
-rw-r--r--releasenotes/notes/driver-maintenance-0945c2939fa4e917.yaml5
-rw-r--r--releasenotes/notes/explicit_ipxe_config_options-d7bf9a743a13f523.yaml17
-rw-r--r--releasenotes/notes/ilo-support-boot-mode-management-apis-8173002daf79894c.yaml5
-rw-r--r--releasenotes/notes/in-band-steps-e4a1fe759029fea5.yaml23
-rw-r--r--releasenotes/notes/ipa-erase_devices-skip-read-only-9f8cd9278c35a84e.yaml12
-rw-r--r--releasenotes/notes/ipmi-retries-min-command-interval-070cd7eff5eb74dd.yaml6
-rw-r--r--releasenotes/notes/iscsi-ansible-steps-817b52269d2455b0.yaml28
-rw-r--r--releasenotes/notes/missing-sw-raid-b7fdc9259612970d.yaml5
-rw-r--r--releasenotes/notes/no-power-on-842b21d55b07a632.yaml9
-rw-r--r--releasenotes/notes/node-network-data-6f998aaa57020f4b.yaml7
-rw-r--r--releasenotes/notes/token-reboot-b48b5981a58a30ae.yaml5
-rw-r--r--requirements.txt3
-rw-r--r--setup.cfg1
-rw-r--r--tools/bandit.yml1
-rw-r--r--tox.ini3
-rw-r--r--zuul.d/ironic-jobs.yaml103
-rw-r--r--zuul.d/project.yaml8
142 files changed, 6115 insertions, 1846 deletions
diff --git a/bindep.txt b/bindep.txt
index 80df57a52..36b4ce35d 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -35,7 +35,7 @@ qemu [platform:dpkg devstack build-image-dib]
qemu-kvm [platform:dpkg devstack]
qemu-utils [platform:dpkg devstack build-image-dib]
qemu-system-data [platform:dpkg devstack]
-sgabios [platform: rpm devstack]
+sgabios [platform:rpm devstack]
ipxe-qemu [platform:dpkg devstack]
edk2-ovmf [platform:rpm devstack]
ovmf [platform:dpkg devstack]
diff --git a/devstack/files/debs/ironic b/devstack/files/debs/ironic
index 375f34439..49b0689d7 100644
--- a/devstack/files/debs/ironic
+++ b/devstack/files/debs/ironic
@@ -20,9 +20,6 @@ jq
libguestfs-tools
libguestfs0
libvirt-bin # dist:bionic
-libvirt-daemon-driver-storage-gluster # dist:focal
-libvirt-daemon-driver-lxc # dist:focal
-libvirt-daemon-driver-storage-rbd # dist:focal
libvirt-daemon-system # dist:focal
libvirt-dev
open-iscsi
diff --git a/devstack/lib/ironic b/devstack/lib/ironic
index a4d5fc07c..40cbd6b26 100644
--- a/devstack/lib/ironic
+++ b/devstack/lib/ironic
@@ -164,6 +164,24 @@ if [[ -n "$BUILD_TIMEOUT" ]]; then
echo "WARNING: BUILD_TIMEOUT variable is renamed to IRONIC_TEMPEST_BUILD_TIMEOUT and will be deprecated in Pike."
fi
+hostdomain=$(hostname)
+if [[ "$hostdomain" =~ "rax" ]]; then
+ echo "WARNING: Auto-increasing the requested build timeout by 1.5 as the detected hostname suggests a cloud host where VMs are software emulated."
+ # NOTE(TheJulia): Rax hosts are entirely qemu emulated, not CPU enabled
+ # virtualization. As such, the ramdisk decompression is known to take an
+ # eceptional amount of time and we need to afford a little more time to
+ # these hosts for jobs to complete without issues.
+ new_timeout=$(echo "$IRONIC_TEMPEST_BUILD_TIMEOUT * 1.5 / 1" | bc)
+ IRONIC_TEMPEST_BUILD_TIMEOUT=$new_timeout
+
+ if [ -n "$IRONIC_PXE_BOOT_RETRY_TIMEOUT" ]; then
+ new_timeout=$(echo "$IRONIC_PXE_BOOT_RETRY_TIMEOUT * 1.5 / 1" | bc)
+ IRONIC_PXE_BOOT_RETRY_TIMEOUT=$new_timeout
+ fi
+ # TODO(TheJulia): If we have to do magically extend timeouts again,
+ # we should make a helper method...
+fi
+
IRONIC_DEFAULT_API_VERSION=${IRONIC_DEFAULT_API_VERSION:-}
IRONIC_CMD="openstack baremetal"
if [[ -n "$IRONIC_DEFAULT_API_VERSION" ]]; then
@@ -1691,10 +1709,8 @@ function configure_ironic_conductor {
local pxebin
pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
uefipxebin=`basename $(get_uefi_ipxe_boot_file)`
- iniset $IRONIC_CONF_FILE pxe pxe_config_template '$pybasedir/drivers/modules/ipxe_config.template'
- iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin
- iniset $IRONIC_CONF_FILE pxe uefi_pxe_config_template '$pybasedir/drivers/modules/ipxe_config.template'
- iniset $IRONIC_CONF_FILE pxe uefi_pxe_bootfile_name $uefipxebin
+ iniset $IRONIC_CONF_FILE pxe ipxe_bootfile_name $pxebin
+ iniset $IRONIC_CONF_FILE pxe uefi_ipxe_bootfile_name $uefipxebin
iniset $IRONIC_CONF_FILE deploy http_root $IRONIC_HTTP_DIR
iniset $IRONIC_CONF_FILE deploy http_url "http://$([[ $IRONIC_HTTP_SERVER =~ : ]] && echo "[$IRONIC_HTTP_SERVER]" || echo $IRONIC_HTTP_SERVER):$IRONIC_HTTP_PORT"
if [[ "$IRONIC_IPXE_USE_SWIFT" == "True" ]]; then
@@ -2958,6 +2974,9 @@ function ironic_configure_tempest {
if [[ -n "$IRONIC_IP_VERSION" ]]; then
iniset $TEMPEST_CONFIG validation ip_version_for_ssh $IRONIC_IP_VERSION
fi
+ if [[ -n "$IRONIC_BOOT_MODE" ]]; then
+ iniset $TEMPEST_CONFIG baremetal boot_mode $IRONIC_BOOT_MODE
+ fi
if [[ "$IRONIC_IP_VERSION" == "6" ]]; then
# No FIPs in V6 and we dynamically create networks...
# network_for_ssh is defaulted to public
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 702fb23bc..d7394a9e6 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -3,6 +3,5 @@ os-api-ref>=1.4.0 # Apache-2.0
reno>=3.1.0 # Apache-2.0
sphinx>=2.0.0,!=2.1.0 # BSD
sphinxcontrib-apidoc>=0.2.0 # BSD
-sphinxcontrib-pecanwsme>=0.10.0 # Apache-2.0
sphinxcontrib-seqdiag>=0.8.4 # BSD
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
diff --git a/doc/source/admin/agent-power.rst b/doc/source/admin/agent-power.rst
new file mode 100644
index 000000000..b948733ee
--- /dev/null
+++ b/doc/source/admin/agent-power.rst
@@ -0,0 +1,76 @@
+=================================
+Deploying without BMC Credentials
+=================================
+
+The Bare Metal service usually requires BMC credentials for all provisioning
+operations. Starting with the Victoria release series there is limited support
+for inspection, cleaning and deployments without the credentials.
+
+.. warning::
+ This feature is experimental and only works in a limited scenario. When
+ using it, you have to be prepared to provide BMC credentials in case of
+ a failure or any non-supported actions.
+
+How it works
+============
+
+The expected workflow is as follows:
+
+#. The node is discovered by manually powering it on and gets the
+ `manual-management` hardware type and `agent` power interface.
+
+ If discovery is not used, a node can be enrolled through the API and then
+ powered on manually.
+
+#. The operator moves the node to `manageable`. It works because the `agent`
+ power only requires to be able to connect to the agent.
+
+#. The operator moves the node to `available`. Cleaning happens normally via
+ the already running agent. If reboot is needed, it is done by telling the
+ agent to reboot the node in-band.
+
+#. A user deploys the node. Deployment happens normally via the already
+ running agent.
+
+#. In the end of the deployment, the node is rebooted via the reboot command
+ instead of power off+on.
+
+Enabling
+========
+
+:doc:`fast-track` is a requirement for this feature to work. After enabling it,
+adds the ``agent`` power interface and the ``manual-management`` hardware type
+to the enabled list:
+
+.. code-block:: ini
+
+ [DEFAULT]
+ enabled_hardware_types = manual-management
+ enabled_management_interfaces = noop
+ enabled_power_interfaces = agent
+
+ [deploy]
+ fast_track = true
+
+As usual with the ``noop`` management, enable the networking boot fallback:
+
+.. code-block:: ini
+
+ [pxe]
+ enable_netboot_fallback = true
+
+If using discovery, :ironic-inspector-doc:`configure discovery in
+ironic-inspector <user/usage.html#discovery>` with the default driver set
+to ``manual-management``.
+
+Limitations
+===========
+
+* Only the ``noop`` network interface is supported.
+
+* Undeploy and rescue are not supported, you need to add BMC credentials first.
+
+* If any errors happens in the process, recovery will likely require BMC
+ credentials.
+
+* Only rebooting is possible through the API, power on/off commands will fail.
diff --git a/doc/source/admin/agent-token.rst b/doc/source/admin/agent-token.rst
index 90528bc38..4c2fd0e34 100644
--- a/doc/source/admin/agent-token.rst
+++ b/doc/source/admin/agent-token.rst
@@ -43,7 +43,7 @@ It remains available to the conductors, and is stored in memory of the
With the token is available in memory in the agent, the token is embedded with
``heartbeat`` operations to the ironic API endpoint. This enables the API to
authenticate the heartbeat request, and refuse "heartbeat" requests from the
-``ironic-python-agent``. With the ``Ussuri`` release, the confiuration option
+``ironic-python-agent``. With the ``Ussuri`` release, the configuration option
``[DEFAULT]require_agent_token`` can be set ``True`` to explicitly require
token use.
diff --git a/doc/source/admin/boot-from-volume.rst b/doc/source/admin/boot-from-volume.rst
index a33888fcd..fd89360a6 100644
--- a/doc/source/admin/boot-from-volume.rst
+++ b/doc/source/admin/boot-from-volume.rst
@@ -177,7 +177,7 @@ to a remote boot from volume target, so that also must be ensured by
the user in advance.
Records of volume targets are removed upon the node being undeployed,
-and as such are not presistent across deployments.
+and as such are not persistent across deployments.
Cinder Multi-attach
-------------------
diff --git a/doc/source/admin/drivers/redfish.rst b/doc/source/admin/drivers/redfish.rst
index 2e5a50a2b..f784740dc 100644
--- a/doc/source/admin/drivers/redfish.rst
+++ b/doc/source/admin/drivers/redfish.rst
@@ -185,6 +185,29 @@ property can be used to pass user-specified kernel command line parameters.
For ramdisk kernel, ``[instance_info]/kernel_append_params`` property serves
the same purpose.
+Virtual Media Ramdisk
+~~~~~~~~~~~~~~~~~~~~~
+
+The ``ramdisk`` deploy interface can be used in concert with the the
+``redfish-virtual-media`` boot interface to facilitate the boot of a remote
+node utilizing pre-supplied virtual media.
+
+Instead of supplying an ``[instance_info]/image_source`` parameter, a
+``[instance_info]/boot_iso`` parameter can be supplied. The image will
+be downloaded by the conductor, and the instance will be booted using
+the supplied ISO image. In accordance with the ``ramdisk`` deployment
+interface behavior, once booted the machine will have a ``provision_state``
+of ``ACTIVE``.
+
+.. code-block:: bash
+
+ openstack baremetal node set \
+ --instance_info boot_iso=http://url/to.iso node-0
+
+This initial interface does not support bootloader configuration
+parameter injection, as such the ``[instance_info]/kernel_append_params``
+setting is ignored.
+
.. _Redfish: http://redfish.dmtf.org/
.. _Sushy: https://opendev.org/openstack/sushy
.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security
diff --git a/doc/source/admin/fast-track.rst b/doc/source/admin/fast-track.rst
new file mode 100644
index 000000000..464966da8
--- /dev/null
+++ b/doc/source/admin/fast-track.rst
@@ -0,0 +1,50 @@
+=====================
+Fast-Track Deployment
+=====================
+
+*Fast track* is a mode of operation where the Bare Metal service keeps a
+machine powered on with the agent running between provisioning operations.
+It is first booted during in-band inspection or cleaning (whatever happens
+first) and is only shut down before rebooting into the final instance.
+Depending on the configuration, this mode can save several reboots and is
+particularly useful for scenarios where nodes are enrolled, prepared and
+provisioned within a short period of time.
+
+.. warning::
+ Fast track deployment targets standalone use cases and is only tested with
+ the ``noop`` networking. The case where inspection, cleaning and
+ provisioning networks are different is not supported.
+
+Enabling
+========
+
+Fast track is off by default and should be enabled in the configuration:
+
+.. code-block:: ini
+
+ [deploy]
+ fast_track = true
+
+Inspection
+----------
+
+If using :ref:`in-band inspection`, you need to tell ironic-inspector not to
+power off nodes afterwards. Depending on the inspection mode (managed or
+unmanaged), you need to configure two places. In ``ironic.conf``:
+
+.. code-block:: ini
+
+ [inspector]
+ power_off = false
+
+And in ``inspector.conf``:
+
+.. code-block:: ini
+
+ [processing]
+ power_off = false
+
+Finally, you need to update the :ironic-inspector-doc:`inspection PXE
+configuration <install/index.html#configuration>` to include the
+``ipa-api-url`` kernel parameter, pointing at the **ironic** endpoint, in
+addition to the existing ``ipa-inspection-callback-url``.
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 4815be3c3..5154aa39b 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -23,18 +23,27 @@ the services.
Port Groups <portgroups>
Configuring Web or Serial Console <console>
Enabling Notifications <notifications>
- Ceph Object Gateway <radosgw>
- Emitting Software Metrics <metrics>
- Auditing API Traffic <api-audit-support>
- Service State Reporting <gmr>
Conductor Groups <conductor-groups>
Upgrade Guide <upgrade-guide>
Security <security>
- Windows Images <building-windows-images>
Troubleshooting FAQ <troubleshooting>
Power Sync with the Compute Service <power-sync>
- Agent Token <agent-token>
Node Multi-Tenancy <node-multitenancy>
+ Fast-Track Deployment <fast-track>
+
+Advanced Topics
+---------------
+
+.. toctree::
+ :maxdepth: 1
+
+ Ceph Object Gateway <radosgw>
+ Windows Images <building-windows-images>
+ Emitting Software Metrics <metrics>
+ Auditing API Traffic <api-audit-support>
+ Service State Reporting <gmr>
+ Agent Token <agent-token>
+ Deploying without BMC Credentials <agent-power>
.. toctree::
:hidden:
diff --git a/doc/source/admin/inspection.rst b/doc/source/admin/inspection.rst
index f403c0bbc..41605f1b1 100644
--- a/doc/source/admin/inspection.rst
+++ b/doc/source/admin/inspection.rst
@@ -68,6 +68,8 @@ for scheduling::
Please see a specific :doc:`hardware type page </admin/drivers>` for
the exact list of capabilities this hardware type can discover.
+.. _in-band inspection:
+
In-band inspection
------------------
diff --git a/doc/source/admin/node-deployment.rst b/doc/source/admin/node-deployment.rst
index 39dbc28a5..3136685ed 100644
--- a/doc/source/admin/node-deployment.rst
+++ b/doc/source/admin/node-deployment.rst
@@ -40,15 +40,49 @@ BIOS, and RAID interfaces.
.. _node-deployment-core-steps:
-Core steps
-----------
-
-Certain default deploy steps are designated as 'core' deploy steps. The
-following deploy steps are core:
-
-``deploy.deploy``
- In this step the node is booted using a provisioning image, and the user
- image is written to the node's disk. It has a priority of 100.
+Agent steps
+-----------
+
+All deploy interfaces based on ironic-python-agent (i.e. ``direct``, ``iscsi``
+and ``ansible`` and any derivatives) expose the following deploy steps:
+
+``deploy.deploy`` (priority 100)
+ In this step the node is booted using a provisioning image.
+``deploy.write_image`` (priority 80)
+ An out-of-band (``iscsi``, ``ansible``) or in-band (``direct``) step that
+ downloads and writes the image to the node.
+``deploy.tear_down_agent`` (priority 40)
+ In this step the provisioning image is shut down.
+``deploy.switch_to_tenant_network`` (priority 30)
+ In this step networking for the node is switched from provisioning to
+ tenant networks.
+``deploy.boot_instance`` (priority 20)
+ In this step the node is booted into the user image.
+
+Additionally, the ``iscsi`` and ``direct`` deploy interfaces have:
+
+``deploy.prepare_instance_boot`` (priority 60)
+ In this step the boot device is configured and the bootloader is installed.
+
+ .. note::
+ For the ``ansible`` deploy interface these steps are done in
+ ``deploy.write_image``.
+
+Accordingly, the following priority ranges can be used for custom deploy steps:
+
+> 100
+ Out-of-band steps to run before deployment.
+81 to 99
+ In-band deploy steps to run before the image is written.
+61 to 79
+ In-band deploy steps to run after the image is written but before the
+ bootloader is installed.
+41 to 59
+ In-band steps to run after the image is written the bootloader is installed.
+21 to 39
+ Out-of-band steps to run after the provisioning image is shut down.
+1 to 19
+ Any steps that are run when the user instance is already running.
Writing a Deploy Step
---------------------
diff --git a/doc/source/admin/raid.rst b/doc/source/admin/raid.rst
index 5d32f170f..38753d698 100644
--- a/doc/source/admin/raid.rst
+++ b/doc/source/admin/raid.rst
@@ -410,7 +410,17 @@ have its root file system on the first partition. Starting with Ussuri,
the image can also have additional metadata to point Ironic to the
partition with the root file system: for this, the image needs to set
the ``rootfs_uuid`` property with the file system UUID of the root file
-system. The pre-Ussuri approach, i.e. to have the root file system on
+system. One way to extract this UUID from an existing image is to
+download the image, mount it as a loopback device, and use ``blkid``:
+
+.. code-block:: bash
+
+ $ sudo losetup -f
+ $ sudo losetup /dev/loop0 /tmp/myimage.raw
+ $ sudo kpartx -a /dev/loop0
+ $ blkid
+
+The pre-Ussuri approach, i.e. to have the root file system on
the first partition, is kept as a fallback and hence allows software
RAID deployments where Ironic does not have access to any image metadata
(e.g. Ironic stand-alone).
diff --git a/doc/source/admin/troubleshooting.rst b/doc/source/admin/troubleshooting.rst
index 1ac680e1f..0c29343c8 100644
--- a/doc/source/admin/troubleshooting.rst
+++ b/doc/source/admin/troubleshooting.rst
@@ -559,3 +559,80 @@ waiting for an event that is never happening. In these cases, it might be
helpful to connect to the IPA and inspect its logs, see the trouble shooting
guide of the :ironic-python-agent-doc:`ironic-python-agent (IPA) <>` on how
to do this.
+
+Deployments fail with "failed to update MAC address"
+====================================================
+
+The design of the integration with the Networking service (neutron) is such
+that once virtual ports have been created in the API, their MAC address must
+be updated in order for the DHCP server to be able to appropriately reply.
+
+This can sometimes result in errors being raised indicating that the MAC
+address is already in use. This is because at some point in the past, a
+virtual interface was orphaned either by accident or by some unexpected
+glitch, and a previous entry is still present in Neutron.
+
+This error looks something like this when reported in the ironic-conductor
+log output.:
+
+ Failed to update MAC address on Neutron port 305beda7-0dd0-4fec-b4d2-78b7aa4e8e6a.: MacAddressInUseClient: Unable to complete operation for network 1e252627-6223-4076-a2b9-6f56493c9bac. The mac address 52:54:00:7c:c4:56 is in use.
+
+Because we have no idea about this entry, we fail the deployment process
+as we can't make a number of assumptions in order to attempt to automatically
+resolve the conflict.
+
+How did I get here?
+-------------------
+
+Originally this was a fairly easy issue to encounter. The retry logic path
+which resulted between the Orchestration (heat) and Compute (nova) services,
+could sometimes result in additional un-necessary ports being created.
+
+Bugs of this class have been largely resolved since the Rocky development
+cycle. Since then, the way this can become encountered is due to Networking
+(neutron) VIF attachments not being removed or deleted prior to deleting a
+port in the Bare Metal service.
+
+Ultimately, the key of this is that the port is being deleted. Under most
+operating circumstances, there really is no need to delete the port, and
+VIF attachments are stored on the port object, so deleting the port
+*CAN* result in the VIF not being cleaned up from Neutron.
+
+Under normal circumstances, when deleting ports, a node should be in a
+stable state, and the node should not be provisioned. If the
+``openstack baremetal port delete`` command fails, this may indicate that
+a known VIF is still attached. Generally if they are transitory from cleaning,
+provisioning, rescuing, or even inspection, getting the node to the
+``available`` state wil unblock your delete operation, that is unless there is
+a tenant VIF attahment. In that case, the vif will need to be removed from
+with-in the Bare Metal service using the
+``openstack baremetal node vif detach`` command.
+
+A port can also be checked to see if there is a VIF attachment by consulting
+the port's ``internal_info`` field.
+
+.. warning::
+ The ``maintenance`` flag can be used to force the node's port to be
+ deleted, however this will disable any check that would normally block
+ the user from issuing a delete and accidently orphaning the VIF attachment
+ record.
+
+How do I resolve this?
+----------------------
+
+Generally, you need to identify the port with the offending MAC address.
+Example:
+
+ openstack port list --mac-address 52:54:00:7c:c4:56
+
+From the command's output, you should be able to identify the ``id`` field.
+Using that, you can delete the port. Example:
+
+ openstack port delete <id>
+
+.. warning::
+ Before deleting a port, you should always verify that it is no longer in
+ use or no longer seems applicable/operable. If multiple deployments of
+ the Bare Metal service with a single Neutron, the possibility that a
+ inventory typo, or possibly even a duplicate MAC address exists, which
+ could also produce the same basic error message.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index ca13aa69c..1f667a4b4 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -34,8 +34,6 @@ sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts'))
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.viewcode',
'sphinx.ext.graphviz',
- 'sphinxcontrib.httpdomain',
- 'sphinxcontrib.pecanwsme.rest',
'sphinxcontrib.seqdiag',
'sphinxcontrib.apidoc',
'sphinxcontrib.rsvgconverter',
diff --git a/doc/source/contributor/releasing.rst b/doc/source/contributor/releasing.rst
index dd3744e0f..71a91a695 100644
--- a/doc/source/contributor/releasing.rst
+++ b/doc/source/contributor/releasing.rst
@@ -137,6 +137,97 @@ Things to do before releasing
Otherwise, once it is made, CI (the grenade job that tests new-release ->
master) will fail.
+* Check for any open patches that are close to be merged or release critical.
+
+ This usually includes important bug fixes and/or features that we'd like to
+ release, including the related documentation.
+
+How to propose a release
+========================
+
+The steps that lead to a release proposal are mainly manual, while proposing
+the release itself is almost a 100% automated process, accomplished by
+following the next steps:
+
+* Clone the `openstack/releases <https://opendev.org/openstack/releases>`_
+ repository. This is where deliverables are tracked and all the automation
+ resides.
+
+ * Under the ``deliverables`` directory you can see yaml files for each
+ deliverable (i.e. subproject) grouped by release cycles.
+
+ * The ``_independent`` directory contains yaml files for deliverables that
+ are not bound to (official) cycles (e.g. ironic-python-agent-builder).
+
+* To check the changes we're about to release we can use the script
+ ``list_unreleased_changes.sh``, that can be found under the ``tools``
+ directory, with this syntax:
+
+ .. code-block:: bash
+
+ list_unreleased_changes.sh <branch> <deliverable>
+
+ The ``branch`` argument is a branch, not a release series (i.e. master or
+ stable/train, not ussuri or train).
+
+ For example, assuming we're in the main directory of the releases repository,
+ to check the changes in the train branch for ironic-python-agent
+ type:
+
+ .. code-block:: bash
+
+ ./tools/list_unreleased_changes.sh stable/train openstack/ironic-python-agent
+
+* To update the deliverable file for the new release, we use a scripted process
+ in the form of a tox environment called ``new-release``.
+
+ To get familiar with it and see all the options, type:
+
+ .. code-block:: bash
+
+ tox -e venv -- new-release -h
+
+ Now, based on the list of changes we found in the precedent step, and the
+ release notes, we need to decide on whether the next version will be major,
+ minor (feature) or patch (bugfix).
+
+ Note that in this case ``series`` is a code name (train, ussuri), not a
+ branch.
+
+ The ``--stable-branch argument`` is used only for branching in the end of a
+ cycle, independent projects are not branched this way though.
+
+ To propose the release, use the script to update the deliverable file, then
+ commit the change, and propose it for review.
+
+ For example, to propose a minor release for ironic in the master branch use:
+
+ .. code-block:: bash
+
+ tox -e venv -- new-release -v master ironic feature
+
+ Remember to use a meaningful topic, usually using the name of the
+ deliverable, the new version and the branch, if applicable.
+
+ A good commit message title should also include the same, for example
+ "Release ironic 1.2.3 for ussuri"
+
+* As an optional step, we can use ``tox -e list-changes`` to double-check the
+ changes before submitting them for review.
+
+ Also ``tox -e validate`` (it might take a while to run based on the number of
+ changes) does some some sanity-checks, but since everything is scripted,
+ there shouldn't be any issue.
+
+ All the scripts are designed and maintained by the release team; in case of
+ questions or doubts or if any errors should arise, you can reach to them in
+ the IRC channel ``#openstack-release``; all release liaisons should be
+ present there.
+
+* After the change is up for review, the PTL or a release liaison will have to approve
+ it before it can get approved by the release team. Then, it will be processed
+ automatically by zuul.
+
Things to do after releasing
============================
diff --git a/doc/source/install/configure-pxe.rst b/doc/source/install/configure-pxe.rst
index 56f345eff..291b101f3 100644
--- a/doc/source/install/configure-pxe.rst
+++ b/doc/source/install/configure-pxe.rst
@@ -357,41 +357,59 @@ on the Bare Metal service node(s) where ``ironic-conductor`` is running.
Ubuntu::
- cp /usr/lib/ipxe/{undionly.kpxe,ipxe.efi} /tftpboot
+ cp /usr/lib/ipxe/{undionly.kpxe,ipxe.efi,snponly.efi} /tftpboot
Fedora/RHEL7/CentOS7::
- cp /usr/share/ipxe/{undionly.kpxe,ipxe.efi} /tftpboot
+ cp /usr/share/ipxe/{undionly.kpxe,ipxe.efi,snponly.efi} /tftpboot
-#. Enable/Configure iPXE in the Bare Metal Service's configuration file
- (/etc/ironic/ironic.conf):
+#. Enable/Configure iPXE overrides in the Bare Metal Service's configuration
+ file **if required** (/etc/ironic/ironic.conf):
.. code-block:: ini
[pxe]
- # Enable iPXE boot. (boolean value)
- ipxe_enabled=True
-
# Neutron bootfile DHCP parameter. (string value)
- pxe_bootfile_name=undionly.kpxe
+ ipxe_bootfile_name=undionly.kpxe
# Bootfile DHCP parameter for UEFI boot mode. (string value)
- uefi_pxe_bootfile_name=ipxe.efi
+ uefi_ipxe_bootfile_name=ipxe.efi
# Template file for PXE configuration. (string value)
- pxe_config_template=$pybasedir/drivers/modules/ipxe_config.template
+ ipxe_config_template=$pybasedir/drivers/modules/ipxe_config.template
+
+ .. note::
+ Most UEFI systems have integrated networking which means the
+ ``[pxe]uefi_ipxe_bootfile_name`` setting should be set to
+ ``snponly.efi``.
+
+ .. note::
+ Setting the iPXE parameters noted in the code block above to no value,
+ in other words setting a line to something like ``ipxe_bootfile_name=``
+ will result in ironic falling back to the default values of the non-iPXE
+ PXE settings. This is for backwards compatability.
+
+#. Ensure iPXE is the default PXE, if applicable.
- # Template file for PXE configuration for UEFI boot loader.
- # (string value)
- uefi_pxe_config_template=$pybasedir/drivers/modules/ipxe_config.template
+ In earlier versions of ironic, a ``[pxe]ipxe_enabled`` setting allowing
+ operators to declare the behavior of the conductor to exclusively operate
+ as if only iPXE was to be used. As time moved on, iPXE functionality was
+ moved to it's own ``ipxe`` boot interface.
+
+ If you want to emulate that same hehavior, set the following in the
+ configuration file (/etc/ironic/ironic.conf):
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ default_boot_interface=ipxe
+ enabled_boot_interfaces=ipxe,pxe
.. note::
- The ``[pxe]ipxe_enabled`` option has been deprecated and will be removed
- in the T* development cycle. Users should instead consider use of the
- ``ipxe`` boot interface. The same default use of iPXE functionality can
- be achieved by setting the ``[DEFAULT]default_boot_interface`` option
- to ``ipxe``.
+ The ``[DEFAULT]enabled_boot_interfaces`` setting may be exclusively set
+ to ``ipxe``, however ironic has multiple interfaces available depending
+ on the hardware types available for use.
#. It is possible to configure the Bare Metal service in such a way
that nodes will boot into the deploy image directly from Object Storage.
@@ -442,7 +460,6 @@ on the Bare Metal service node(s) where ``ironic-conductor`` is running.
sudo service ironic-conductor restart
-
PXE multi-architecture setup
----------------------------
@@ -498,6 +515,10 @@ nodes will be deployed by 'grubaa64.efi', and ppc64 nodes by 'bootppc64'::
commands, you'll need to switch to use ``linux`` and ``initrd`` command
instead.
+.. note::
+ A ``[pxe]ipxe_bootfile_name_by_arch`` setting is available for multi-arch
+ iPXE based deployment, and defaults to the same behavior as the comperable
+ ``[pxe]pxe_bootfile_by_arch`` setting for standard PXE.
PXE timeouts tuning
-------------------
diff --git a/doc/source/install/enrollment.rst b/doc/source/install/enrollment.rst
index 4d6b0a4b2..1e0f9957e 100644
--- a/doc/source/install/enrollment.rst
+++ b/doc/source/install/enrollment.rst
@@ -250,6 +250,13 @@ and may be combined if desired.
$ openstack baremetal port create $MAC_ADDRESS --node $NODE_UUID
+ .. note::
+ When it is time to remove the node from the Bare Metal service, the
+ command used to remove the port is ``openstack baremetal port delete
+ <port uuid>``. When doing so, it is important to ensure that the
+ baremetal node is not in ``maintenance`` as guarding logic to prevent
+ orphaning Neutron Virtual Interfaces (VIFs) will be overriden.
+
.. _enrollment-scheduling:
Adding scheduling information
diff --git a/ironic/api/args.py b/ironic/api/args.py
new file mode 100644
index 000000000..7addecf8b
--- /dev/null
+++ b/ironic/api/args.py
@@ -0,0 +1,381 @@
+# Copyright 2011-2019 the WSME authors and contributors
+# (See https://opendev.org/x/wsme/)
+#
+# This module is part of WSME and is also released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import decimal
+import json
+import logging
+
+from dateutil import parser as dateparser
+
+from ironic.api import types as atypes
+from ironic.common import exception
+
+LOG = logging.getLogger(__name__)
+
+
+CONTENT_TYPE = 'application/json'
+ACCEPT_CONTENT_TYPES = [
+ CONTENT_TYPE,
+ 'text/javascript',
+ 'application/javascript'
+]
+ENUM_TRUE = ('true', 't', 'yes', 'y', 'on', '1')
+ENUM_FALSE = ('false', 'f', 'no', 'n', 'off', '0')
+
+
+def fromjson_array(datatype, value):
+ if not isinstance(value, list):
+ raise ValueError("Value not a valid list: %s" % value)
+ return [fromjson(datatype.item_type, item) for item in value]
+
+
+def fromjson_dict(datatype, value):
+ if not isinstance(value, dict):
+ raise ValueError("Value not a valid dict: %s" % value)
+ return dict((
+ (fromjson(datatype.key_type, item[0]),
+ fromjson(datatype.value_type, item[1]))
+ for item in value.items()))
+
+
+def fromjson_bool(value):
+ if isinstance(value, (int, bool)):
+ return bool(value)
+ if value in ENUM_TRUE:
+ return True
+ if value in ENUM_FALSE:
+ return False
+ raise ValueError("Value not an unambiguous boolean: %s" % value)
+
+
+def fromjson(datatype, value):
+ """A generic converter from json base types to python datatype.
+
+ """
+ if value is None:
+ return None
+
+ if isinstance(datatype, atypes.ArrayType):
+ return fromjson_array(datatype, value)
+
+ if isinstance(datatype, atypes.DictType):
+ return fromjson_dict(datatype, value)
+
+ if datatype is bytes:
+ if isinstance(value, (str, int, float)):
+ return str(value).encode('utf8')
+ return value
+
+ if datatype is str:
+ if isinstance(value, bytes):
+ return value.decode('utf-8')
+ return value
+
+ if datatype in (int, float):
+ return datatype(value)
+
+ if datatype is bool:
+ return fromjson_bool(value)
+
+ if datatype is decimal.Decimal:
+ return decimal.Decimal(value)
+
+ if datatype is datetime.datetime:
+ return dateparser.parse(value)
+
+ if atypes.iscomplex(datatype):
+ return fromjson_complex(datatype, value)
+
+ if atypes.isusertype(datatype):
+ return datatype.frombasetype(fromjson(datatype.basetype, value))
+
+ return value
+
+
+def fromjson_complex(datatype, value):
+ obj = datatype()
+ attributes = atypes.list_attributes(datatype)
+
+ # Here we check that all the attributes in the value are also defined
+ # in our type definition, otherwise we raise an Error.
+ v_keys = set(value.keys())
+ a_keys = set(adef.name for adef in attributes)
+ if not v_keys <= a_keys:
+ raise exception.UnknownAttribute(None, v_keys - a_keys)
+
+ for attrdef in attributes:
+ if attrdef.name in value:
+ try:
+ val_fromjson = fromjson(attrdef.datatype,
+ value[attrdef.name])
+ except exception.UnknownAttribute as e:
+ e.add_fieldname(attrdef.name)
+ raise
+ if getattr(attrdef, 'readonly', False):
+ raise exception.InvalidInput(attrdef.name, val_fromjson,
+ "Cannot set read only field.")
+ setattr(obj, attrdef.key, val_fromjson)
+ elif attrdef.mandatory:
+ raise exception.InvalidInput(attrdef.name, None,
+ "Mandatory field missing.")
+
+ return atypes.validate_value(datatype, obj)
+
+
+def parse(s, datatypes, bodyarg, encoding='utf8'):
+ jload = json.load
+ if not hasattr(s, 'read'):
+ if isinstance(s, bytes):
+ s = s.decode(encoding)
+ jload = json.loads
+ try:
+ jdata = jload(s)
+ except ValueError:
+ raise exception.ClientSideError("Request is not in valid JSON format")
+ if bodyarg:
+ argname = list(datatypes.keys())[0]
+ try:
+ kw = {argname: fromjson(datatypes[argname], jdata)}
+ except ValueError as e:
+ raise exception.InvalidInput(argname, jdata, e.args[0])
+ except exception.UnknownAttribute as e:
+ # We only know the fieldname at this level, not in the
+ # called function. We fill in this information here.
+ e.add_fieldname(argname)
+ raise
+ else:
+ kw = {}
+ extra_args = []
+ if not isinstance(jdata, dict):
+ raise exception.ClientSideError("Request must be a JSON dict")
+ for key in jdata:
+ if key not in datatypes:
+ extra_args.append(key)
+ else:
+ try:
+ kw[key] = fromjson(datatypes[key], jdata[key])
+ except ValueError as e:
+ raise exception.InvalidInput(key, jdata[key], e.args[0])
+ except exception.UnknownAttribute as e:
+ # We only know the fieldname at this level, not in the
+ # called function. We fill in this information here.
+ e.add_fieldname(key)
+ raise
+ if extra_args:
+ raise exception.UnknownArgument(', '.join(extra_args))
+ return kw
+
+
+def from_param(datatype, value):
+ if datatype is datetime.datetime:
+ return dateparser.parse(value) if value else None
+
+ if isinstance(datatype, atypes.UserType):
+ return datatype.frombasetype(
+ from_param(datatype.basetype, value))
+
+ if isinstance(datatype, atypes.ArrayType):
+ if value is None:
+ return value
+ return [
+ from_param(datatype.item_type, item)
+ for item in value
+ ]
+
+ return datatype(value) if value is not None else None
+
+
+def from_params(datatype, params, path, hit_paths):
+ if isinstance(datatype, atypes.ArrayType):
+ return array_from_params(datatype, params, path, hit_paths)
+
+ if isinstance(datatype, atypes.UserType):
+ return usertype_from_params(datatype, params, path, hit_paths)
+
+ if path in params:
+ assert not isinstance(datatype, atypes.DictType), \
+ 'DictType unsupported'
+ assert not atypes.iscomplex(datatype) or datatype is atypes.File, \
+ 'complex type unsupported'
+ hit_paths.add(path)
+ return from_param(datatype, params[path])
+ return atypes.Unset
+
+
+def array_from_params(datatype, params, path, hit_paths):
+ if hasattr(params, 'getall'):
+ # webob multidict
+ def getall(params, path):
+ return params.getall(path)
+ elif hasattr(params, 'getlist'):
+ # werkzeug multidict
+ def getall(params, path): # noqa
+ return params.getlist(path)
+ if path in params:
+ hit_paths.add(path)
+ return [
+ from_param(datatype.item_type, value)
+ for value in getall(params, path)]
+
+ return atypes.Unset
+
+
+def usertype_from_params(datatype, params, path, hit_paths):
+ if path in params:
+ hit_paths.add(path)
+ value = from_param(datatype.basetype, params[path])
+ if value is not atypes.Unset:
+ return datatype.frombasetype(value)
+ return atypes.Unset
+
+
+def args_from_args(funcdef, args, kwargs):
+ newargs = []
+ for argdef, arg in zip(funcdef.arguments[:len(args)], args):
+ try:
+ newargs.append(from_param(argdef.datatype, arg))
+ except Exception as e:
+ if isinstance(argdef.datatype, atypes.UserType):
+ datatype_name = argdef.datatype.name
+ elif isinstance(argdef.datatype, type):
+ datatype_name = argdef.datatype.__name__
+ else:
+ datatype_name = argdef.datatype.__class__.__name__
+ raise exception.InvalidInput(
+ argdef.name,
+ arg,
+ "unable to convert to %(datatype)s. Error: %(error)s" % {
+ 'datatype': datatype_name, 'error': e})
+ newkwargs = {}
+ for argname, value in kwargs.items():
+ newkwargs[argname] = from_param(
+ funcdef.get_arg(argname).datatype, value
+ )
+ return newargs, newkwargs
+
+
+def args_from_params(funcdef, params):
+ kw = {}
+ hit_paths = set()
+ for argdef in funcdef.arguments:
+ value = from_params(
+ argdef.datatype, params, argdef.name, hit_paths)
+ if value is not atypes.Unset:
+ kw[argdef.name] = value
+ paths = set(params.keys())
+ unknown_paths = paths - hit_paths
+ if '__body__' in unknown_paths:
+ unknown_paths.remove('__body__')
+ if not funcdef.ignore_extra_args and unknown_paths:
+ raise exception.UnknownArgument(', '.join(unknown_paths))
+ return [], kw
+
+
+def args_from_body(funcdef, body, mimetype):
+ if funcdef.body_type is not None:
+ datatypes = {funcdef.arguments[-1].name: funcdef.body_type}
+ else:
+ datatypes = dict(((a.name, a.datatype) for a in funcdef.arguments))
+
+ if not body:
+ return (), {}
+
+ if mimetype == "application/x-www-form-urlencoded":
+ # the parameters should have been parsed in params
+ return (), {}
+ elif mimetype not in ACCEPT_CONTENT_TYPES:
+ raise exception.ClientSideError("Unknown mimetype: %s" % mimetype,
+ status_code=415)
+
+ try:
+ kw = parse(
+ body, datatypes, bodyarg=funcdef.body_type is not None
+ )
+ except exception.UnknownArgument:
+ if not funcdef.ignore_extra_args:
+ raise
+ kw = {}
+
+ return (), kw
+
+
+def combine_args(funcdef, akw, allow_override=False):
+ newargs, newkwargs = [], {}
+ for args, kwargs in akw:
+ for i, arg in enumerate(args):
+ n = funcdef.arguments[i].name
+ if not allow_override and n in newkwargs:
+ raise exception.ClientSideError(
+ "Parameter %s was given several times" % n)
+ newkwargs[n] = arg
+ for name, value in kwargs.items():
+ n = str(name)
+ if not allow_override and n in newkwargs:
+ raise exception.ClientSideError(
+ "Parameter %s was given several times" % n)
+ newkwargs[n] = value
+ return newargs, newkwargs
+
+
+def get_args(funcdef, args, kwargs, params, body, mimetype):
+ """Combine arguments from multiple sources
+
+ Combine arguments from :
+ * the host framework args and kwargs
+ * the request params
+ * the request body
+
+ Note that the host framework args and kwargs can be overridden
+ by arguments from params of body
+
+ """
+ # get the body from params if not given directly
+ if not body and '__body__' in params:
+ body = params['__body__']
+
+ # extract args from the host args and kwargs
+ from_args = args_from_args(funcdef, args, kwargs)
+
+ # extract args from the request parameters
+ from_params = args_from_params(funcdef, params)
+
+ # extract args from the request body
+ from_body = args_from_body(funcdef, body, mimetype)
+
+ # combine params and body arguments
+ from_params_and_body = combine_args(
+ funcdef,
+ (from_params, from_body)
+ )
+
+ args, kwargs = combine_args(
+ funcdef,
+ (from_args, from_params_and_body),
+ allow_override=True
+ )
+ check_arguments(funcdef, args, kwargs)
+ return args, kwargs
+
+
+def check_arguments(funcdef, args, kw):
+ """Check if some arguments are missing"""
+ assert len(args) == 0
+ for arg in funcdef.arguments:
+ if arg.mandatory and arg.name not in kw:
+ raise exception.MissingArgument(arg.name)
diff --git a/ironic/api/controllers/link.py b/ironic/api/controllers/link.py
index 8f2549c9b..490a78ab5 100644
--- a/ironic/api/controllers/link.py
+++ b/ironic/api/controllers/link.py
@@ -14,8 +14,6 @@
# under the License.
from ironic import api
-from ironic.api.controllers import base
-from ironic.api import types as atypes
def build_url(resource, resource_args, bookmark=False, base_url=None):
@@ -30,28 +28,15 @@ def build_url(resource, resource_args, bookmark=False, base_url=None):
return template % {'url': base_url, 'res': resource, 'args': resource_args}
-class Link(base.Base):
- """A link representation."""
-
- href = str
- """The url of a link."""
-
- rel = str
- """The name of a link."""
-
- type = str
- """Indicates the type of document/link."""
-
- @staticmethod
- def make_link(rel_name, url, resource, resource_args,
- bookmark=False, type=atypes.Unset):
- href = build_url(resource, resource_args,
- bookmark=bookmark, base_url=url)
- return Link(href=href, rel=rel_name, type=type)
-
- @classmethod
- def sample(cls):
- sample = cls(href="http://localhost:6385/chassis/"
- "eaaca217-e7d8-47b4-bb41-3f99f20eed89",
- rel="bookmark")
- return sample
+def make_link(rel_name, url, resource, resource_args,
+ bookmark=False, type=None):
+ """Build a dict representing a link"""
+ href = build_url(resource, resource_args,
+ bookmark=bookmark, base_url=url)
+ l = {
+ 'href': href,
+ 'rel': rel_name
+ }
+ if type:
+ l['type'] = type
+ return l
diff --git a/ironic/api/controllers/root.py b/ironic/api/controllers/root.py
index 42308fd82..440e10000 100644
--- a/ironic/api/controllers/root.py
+++ b/ironic/api/controllers/root.py
@@ -17,53 +17,28 @@
import pecan
from pecan import rest
-from ironic.api.controllers import base
from ironic.api.controllers import v1
from ironic.api.controllers import version
-from ironic.api import expose
+from ironic.api import method
-class Root(base.Base):
-
- name = str
- """The name of the API"""
-
- description = str
- """Some information about this API"""
-
- versions = [version.Version]
- """Links to all the versions available in this API"""
-
- default_version = version.Version
- """A link to the default version of the API"""
-
- @staticmethod
- def convert():
- root = Root()
- root.name = "OpenStack Ironic API"
- root.description = ("Ironic is an OpenStack project which aims to "
- "provision baremetal machines.")
- root.default_version = version.default_version()
- root.versions = [root.default_version]
- return root
+def root():
+ return {
+ 'name': "OpenStack Ironic API",
+ 'description': ("Ironic is an OpenStack project which aims to "
+ "provision baremetal machines."),
+ 'default_version': version.default_version(),
+ 'versions': version.all_versions()
+ }
class RootController(rest.RestController):
- _versions = [version.ID_VERSION1]
- """All supported API versions"""
-
- _default_version = version.ID_VERSION1
- """The default API version"""
-
v1 = v1.Controller()
- @expose.expose(Root)
+ @method.expose()
def get(self):
- # NOTE: The reason why convert() it's being called for every
- # request is because we need to get the host url from
- # the request object to make the links.
- return Root.convert()
+ return root()
@pecan.expose()
def _route(self, args, request=None):
@@ -73,6 +48,6 @@ class RootController(rest.RestController):
if the version number is not specified in the url.
"""
- if args[0] and args[0] not in self._versions:
- args = [self._default_version] + args
+ if args[0] and args[0] != version.ID_VERSION1:
+ args = [version.ID_VERSION1] + args
return super(RootController, self)._route(args, request)
diff --git a/ironic/api/controllers/v1/__init__.py b/ironic/api/controllers/v1/__init__.py
index cd568881f..b0be184f9 100644
--- a/ironic/api/controllers/v1/__init__.py
+++ b/ironic/api/controllers/v1/__init__.py
@@ -77,158 +77,158 @@ class V1(base.Base):
media_types = [MediaType]
"""An array of supported media types for this version"""
- links = [link.Link]
+ links = None
"""Links that point to a specific URL for this version and documentation"""
- chassis = [link.Link]
+ chassis = None
"""Links to the chassis resource"""
- nodes = [link.Link]
+ nodes = None
"""Links to the nodes resource"""
- ports = [link.Link]
+ ports = None
"""Links to the ports resource"""
- portgroups = [link.Link]
+ portgroups = None
"""Links to the portgroups resource"""
- drivers = [link.Link]
+ drivers = None
"""Links to the drivers resource"""
- volume = [link.Link]
+ volume = None
"""Links to the volume resource"""
- lookup = [link.Link]
+ lookup = None
"""Links to the lookup resource"""
- heartbeat = [link.Link]
+ heartbeat = None
"""Links to the heartbeat resource"""
- conductors = [link.Link]
+ conductors = None
"""Links to the conductors resource"""
- allocations = [link.Link]
+ allocations = None
"""Links to the allocations resource"""
- deploy_templates = [link.Link]
+ deploy_templates = None
"""Links to the deploy_templates resource"""
- version = version.Version
+ version = None
"""Version discovery information."""
- events = [link.Link]
+ events = None
"""Links to the events resource"""
@staticmethod
def convert():
v1 = V1()
v1.id = "v1"
- v1.links = [link.Link.make_link('self', api.request.public_url,
- 'v1', '', bookmark=True),
- link.Link.make_link('describedby',
- 'https://docs.openstack.org',
- '/ironic/latest/contributor/',
- 'webapi.html',
- bookmark=True, type='text/html')
+ v1.links = [link.make_link('self', api.request.public_url,
+ 'v1', '', bookmark=True),
+ link.make_link('describedby',
+ 'https://docs.openstack.org',
+ '/ironic/latest/contributor/',
+ 'webapi.html',
+ bookmark=True, type='text/html')
]
v1.media_types = [MediaType('application/json',
'application/vnd.openstack.ironic.v1+json')]
- v1.chassis = [link.Link.make_link('self', api.request.public_url,
- 'chassis', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'chassis', '',
- bookmark=True)
+ v1.chassis = [link.make_link('self', api.request.public_url,
+ 'chassis', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'chassis', '',
+ bookmark=True)
]
- v1.nodes = [link.Link.make_link('self', api.request.public_url,
- 'nodes', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'nodes', '',
- bookmark=True)
+ v1.nodes = [link.make_link('self', api.request.public_url,
+ 'nodes', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'nodes', '',
+ bookmark=True)
]
- v1.ports = [link.Link.make_link('self', api.request.public_url,
- 'ports', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'ports', '',
- bookmark=True)
+ v1.ports = [link.make_link('self', api.request.public_url,
+ 'ports', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'ports', '',
+ bookmark=True)
]
if utils.allow_portgroups():
v1.portgroups = [
- link.Link.make_link('self', api.request.public_url,
- 'portgroups', ''),
- link.Link.make_link('bookmark', api.request.public_url,
- 'portgroups', '', bookmark=True)
+ link.make_link('self', api.request.public_url,
+ 'portgroups', ''),
+ link.make_link('bookmark', api.request.public_url,
+ 'portgroups', '', bookmark=True)
]
- v1.drivers = [link.Link.make_link('self', api.request.public_url,
- 'drivers', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'drivers', '',
- bookmark=True)
+ v1.drivers = [link.make_link('self', api.request.public_url,
+ 'drivers', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'drivers', '',
+ bookmark=True)
]
if utils.allow_volume():
v1.volume = [
- link.Link.make_link('self',
- api.request.public_url,
- 'volume', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'volume', '',
- bookmark=True)
+ link.make_link('self',
+ api.request.public_url,
+ 'volume', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'volume', '',
+ bookmark=True)
]
if utils.allow_ramdisk_endpoints():
- v1.lookup = [link.Link.make_link('self', api.request.public_url,
- 'lookup', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'lookup', '',
- bookmark=True)
+ v1.lookup = [link.make_link('self', api.request.public_url,
+ 'lookup', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'lookup', '',
+ bookmark=True)
]
- v1.heartbeat = [link.Link.make_link('self',
- api.request.public_url,
- 'heartbeat', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'heartbeat', '',
- bookmark=True)
+ v1.heartbeat = [link.make_link('self',
+ api.request.public_url,
+ 'heartbeat', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'heartbeat', '',
+ bookmark=True)
]
if utils.allow_expose_conductors():
- v1.conductors = [link.Link.make_link('self',
- api.request.public_url,
- 'conductors', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'conductors', '',
- bookmark=True)
+ v1.conductors = [link.make_link('self',
+ api.request.public_url,
+ 'conductors', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'conductors', '',
+ bookmark=True)
]
if utils.allow_allocations():
- v1.allocations = [link.Link.make_link('self',
- api.request.public_url,
- 'allocations', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'allocations', '',
- bookmark=True)
- ]
- if utils.allow_expose_events():
- v1.events = [link.Link.make_link('self', api.request.public_url,
- 'events', ''),
- link.Link.make_link('bookmark',
+ v1.allocations = [link.make_link('self',
api.request.public_url,
- 'events', '',
+ 'allocations', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'allocations', '',
bookmark=True)
+ ]
+ if utils.allow_expose_events():
+ v1.events = [link.make_link('self', api.request.public_url,
+ 'events', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'events', '',
+ bookmark=True)
]
if utils.allow_deploy_templates():
v1.deploy_templates = [
- link.Link.make_link('self',
- api.request.public_url,
- 'deploy_templates', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'deploy_templates', '',
- bookmark=True)
+ link.make_link('self',
+ api.request.public_url,
+ 'deploy_templates', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'deploy_templates', '',
+ bookmark=True)
]
v1.version = version.default_version()
return v1
diff --git a/ironic/api/controllers/v1/allocation.py b/ironic/api/controllers/v1/allocation.py
index 92ed7c6a0..9cf18d0b6 100644
--- a/ironic/api/controllers/v1/allocation.py
+++ b/ironic/api/controllers/v1/allocation.py
@@ -65,7 +65,7 @@ class Allocation(base.APIBase):
name = atypes.wsattr(str)
"""The logical name for this allocation"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated allocation links"""
state = atypes.wsattr(str, readonly=True)
@@ -107,9 +107,9 @@ class Allocation(base.APIBase):
# This field is only used in POST, never return it.
allocation.node = atypes.Unset
allocation.links = [
- link.Link.make_link('self', url, 'allocations', allocation.uuid),
- link.Link.make_link('bookmark', url, 'allocations',
- allocation.uuid, bookmark=True)
+ link.make_link('self', url, 'allocations', allocation.uuid),
+ link.make_link('bookmark', url, 'allocations',
+ allocation.uuid, bookmark=True)
]
return allocation
diff --git a/ironic/api/controllers/v1/bios.py b/ironic/api/controllers/v1/bios.py
index 3a21c5627..db45e3ce3 100644
--- a/ironic/api/controllers/v1/bios.py
+++ b/ironic/api/controllers/v1/bios.py
@@ -37,7 +37,7 @@ class BIOSSetting(base.APIBase):
value = atypes.wsattr(str)
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
def __init__(self, **kwargs):
self.fields = []
@@ -52,11 +52,11 @@ class BIOSSetting(base.APIBase):
def _convert_with_links(bios, node_uuid, url):
"""Add links to the bios setting."""
name = bios.name
- bios.links = [link.Link.make_link('self', url, 'nodes',
- "%s/bios/%s" % (node_uuid, name)),
- link.Link.make_link('bookmark', url, 'nodes',
- "%s/bios/%s" % (node_uuid, name),
- bookmark=True)]
+ bios.links = [link.make_link('self', url, 'nodes',
+ "%s/bios/%s" % (node_uuid, name)),
+ link.make_link('bookmark', url, 'nodes',
+ "%s/bios/%s" % (node_uuid, name),
+ bookmark=True)]
return bios
@classmethod
diff --git a/ironic/api/controllers/v1/chassis.py b/ironic/api/controllers/v1/chassis.py
index d72cbb383..ba6db6aad 100644
--- a/ironic/api/controllers/v1/chassis.py
+++ b/ironic/api/controllers/v1/chassis.py
@@ -58,10 +58,10 @@ class Chassis(base.APIBase):
extra = {str: types.jsontype}
"""The metadata of the chassis"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated chassis links"""
- nodes = atypes.wsattr([link.Link], readonly=True)
+ nodes = None
"""Links to the collection of nodes contained in this chassis"""
def __init__(self, **kwargs):
@@ -76,23 +76,23 @@ class Chassis(base.APIBase):
@staticmethod
def _convert_with_links(chassis, url, fields=None):
if fields is None:
- chassis.nodes = [link.Link.make_link('self',
- url,
- 'chassis',
- chassis.uuid + "/nodes"),
- link.Link.make_link('bookmark',
- url,
- 'chassis',
- chassis.uuid + "/nodes",
- bookmark=True)
+ chassis.nodes = [link.make_link('self',
+ url,
+ 'chassis',
+ chassis.uuid + "/nodes"),
+ link.make_link('bookmark',
+ url,
+ 'chassis',
+ chassis.uuid + "/nodes",
+ bookmark=True)
]
- chassis.links = [link.Link.make_link('self',
- url,
- 'chassis', chassis.uuid),
- link.Link.make_link('bookmark',
- url,
- 'chassis', chassis.uuid,
- bookmark=True)
+ chassis.links = [link.make_link('self',
+ url,
+ 'chassis', chassis.uuid),
+ link.make_link('bookmark',
+ url,
+ 'chassis', chassis.uuid,
+ bookmark=True)
]
return chassis
diff --git a/ironic/api/controllers/v1/collection.py b/ironic/api/controllers/v1/collection.py
index 8fc44d62c..6e1b1faf3 100644
--- a/ironic/api/controllers/v1/collection.py
+++ b/ironic/api/controllers/v1/collection.py
@@ -52,5 +52,5 @@ class Collection(base.Base):
'args': q_args, 'limit': limit,
'marker': getattr(self.collection[-1], self.get_key_field())}
- return link.Link.make_link('next', api.request.public_url,
- resource_url, next_args).href
+ return link.make_link('next', api.request.public_url,
+ resource_url, next_args)['href']
diff --git a/ironic/api/controllers/v1/conductor.py b/ironic/api/controllers/v1/conductor.py
index 8ab1922ae..096c3c587 100644
--- a/ironic/api/controllers/v1/conductor.py
+++ b/ironic/api/controllers/v1/conductor.py
@@ -53,7 +53,7 @@ class Conductor(base.APIBase):
drivers = atypes.wsattr([str])
"""The drivers enabled on this conductor"""
- links = atypes.wsattr([link.Link])
+ links = None
"""A list containing a self link and associated conductor links"""
def __init__(self, **kwargs):
@@ -72,11 +72,11 @@ class Conductor(base.APIBase):
@staticmethod
def _convert_with_links(conductor, url, fields=None):
- conductor.links = [link.Link.make_link('self', url, 'conductors',
- conductor.hostname),
- link.Link.make_link('bookmark', url, 'conductors',
- conductor.hostname,
- bookmark=True)]
+ conductor.links = [link.make_link('self', url, 'conductors',
+ conductor.hostname),
+ link.make_link('bookmark', url, 'conductors',
+ conductor.hostname,
+ bookmark=True)]
return conductor
@classmethod
diff --git a/ironic/api/controllers/v1/deploy_template.py b/ironic/api/controllers/v1/deploy_template.py
index 6989693e7..90555bad2 100644
--- a/ironic/api/controllers/v1/deploy_template.py
+++ b/ironic/api/controllers/v1/deploy_template.py
@@ -82,7 +82,7 @@ class DeployTemplate(base.APIBase):
steps = atypes.wsattr([DeployStepType], mandatory=True)
"""The deploy steps of this deploy template."""
- links = atypes.wsattr([link.Link])
+ links = None
"""A list containing a self link and associated deploy template links."""
extra = {str: types.jsontype}
@@ -148,11 +148,11 @@ class DeployTemplate(base.APIBase):
@staticmethod
def _convert_with_links(template, url, fields=None):
template.links = [
- link.Link.make_link('self', url, 'deploy_templates',
- template.uuid),
- link.Link.make_link('bookmark', url, 'deploy_templates',
- template.uuid,
- bookmark=True)
+ link.make_link('self', url, 'deploy_templates',
+ template.uuid),
+ link.make_link('bookmark', url, 'deploy_templates',
+ template.uuid,
+ bookmark=True)
]
return template
diff --git a/ironic/api/controllers/v1/driver.py b/ironic/api/controllers/v1/driver.py
index ef63074b3..2e87a20ee 100644
--- a/ironic/api/controllers/v1/driver.py
+++ b/ironic/api/controllers/v1/driver.py
@@ -96,10 +96,10 @@ class Driver(base.Base):
type = str
"""Whether the driver is classic or dynamic (hardware type)"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing self and bookmark links"""
- properties = atypes.wsattr([link.Link], readonly=True)
+ properties = None
"""A list containing links to driver properties"""
"""Default interface for a hardware type"""
@@ -146,23 +146,23 @@ class Driver(base.Base):
driver.name = name
driver.hosts = hosts
driver.links = [
- link.Link.make_link('self',
- api.request.public_url,
- 'drivers', name),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'drivers', name,
- bookmark=True)
+ link.make_link('self',
+ api.request.public_url,
+ 'drivers', name),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'drivers', name,
+ bookmark=True)
]
if api_utils.allow_links_node_states_and_driver_properties():
driver.properties = [
- link.Link.make_link('self',
- api.request.public_url,
- 'drivers', name + "/properties"),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'drivers', name + "/properties",
- bookmark=True)
+ link.make_link('self',
+ api.request.public_url,
+ 'drivers', name + "/properties"),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'drivers', name + "/properties",
+ bookmark=True)
]
if api_utils.allow_dynamic_drivers():
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index 21d429981..cba7a4e5c 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -343,7 +343,7 @@ class Indicator(base.APIBase):
states = atypes.ArrayType(str)
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
def __init__(self, **kwargs):
self.name = kwargs.get('name')
@@ -355,11 +355,11 @@ class Indicator(base.APIBase):
def _convert_with_links(node_uuid, indicator, url):
"""Add links to the indicator."""
indicator.links = [
- link.Link.make_link(
+ link.make_link(
'self', url, 'nodes',
'%s/management/indicators/%s' % (
node_uuid, indicator.name)),
- link.Link.make_link(
+ link.make_link(
'bookmark', url, 'nodes',
'%s/management/indicators/%s' % (
node_uuid, indicator.name),
@@ -1206,19 +1206,19 @@ class Node(base.APIBase):
_set_chassis_uuid)
"""The UUID of the chassis this node belongs"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated node links"""
- ports = atypes.wsattr([link.Link], readonly=True)
+ ports = None
"""Links to the collection of ports on this node"""
- portgroups = atypes.wsattr([link.Link], readonly=True)
+ portgroups = None
"""Links to the collection of portgroups on this node"""
- volume = atypes.wsattr([link.Link], readonly=True)
+ volume = None
"""Links to endpoint for retrieving volume resources on this node"""
- states = atypes.wsattr([link.Link], readonly=True)
+ states = None
"""Links to endpoint for retrieving and setting node states"""
boot_interface = atypes.wsattr(str)
@@ -1336,38 +1336,38 @@ class Node(base.APIBase):
def _convert_with_links(node, url, fields=None, show_states_links=True,
show_portgroups=True, show_volume=True):
if fields is None:
- node.ports = [link.Link.make_link('self', url, 'nodes',
- node.uuid + "/ports"),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid + "/ports",
- bookmark=True)
+ node.ports = [link.make_link('self', url, 'nodes',
+ node.uuid + "/ports"),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/ports",
+ bookmark=True)
]
if show_states_links:
- node.states = [link.Link.make_link('self', url, 'nodes',
- node.uuid + "/states"),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid + "/states",
- bookmark=True)]
+ node.states = [link.make_link('self', url, 'nodes',
+ node.uuid + "/states"),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/states",
+ bookmark=True)]
if show_portgroups:
node.portgroups = [
- link.Link.make_link('self', url, 'nodes',
- node.uuid + "/portgroups"),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid + "/portgroups",
- bookmark=True)]
+ link.make_link('self', url, 'nodes',
+ node.uuid + "/portgroups"),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/portgroups",
+ bookmark=True)]
if show_volume:
node.volume = [
- link.Link.make_link('self', url, 'nodes',
- node.uuid + "/volume"),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid + "/volume",
- bookmark=True)]
-
- node.links = [link.Link.make_link('self', url, 'nodes',
- node.uuid),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid, bookmark=True)
+ link.make_link('self', url, 'nodes',
+ node.uuid + "/volume"),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/volume",
+ bookmark=True)]
+
+ node.links = [link.make_link('self', url, 'nodes',
+ node.uuid),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid, bookmark=True)
]
return node
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index 6422ae27f..9bdd57dac 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -155,7 +155,7 @@ class Port(base.APIBase):
physical_network = atypes.StringType(max_length=64)
"""The name of the physical network to which this port is connected."""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated port links"""
is_smartnic = types.boolean
@@ -199,11 +199,11 @@ class Port(base.APIBase):
url = api.request.public_url
- port.links = [link.Link.make_link('self', url,
- 'ports', port.uuid),
- link.Link.make_link('bookmark', url,
- 'ports', port.uuid,
- bookmark=True)
+ port.links = [link.make_link('self', url,
+ 'ports', port.uuid),
+ link.make_link('bookmark', url,
+ 'ports', port.uuid,
+ bookmark=True)
]
if not sanitize:
diff --git a/ironic/api/controllers/v1/portgroup.py b/ironic/api/controllers/v1/portgroup.py
index d6bd3eb48..fe877c67a 100644
--- a/ironic/api/controllers/v1/portgroup.py
+++ b/ironic/api/controllers/v1/portgroup.py
@@ -90,7 +90,7 @@ class Portgroup(base.APIBase):
name = atypes.wsattr(str)
"""The logical name for this portgroup"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated portgroup links"""
standalone_ports_supported = types.boolean
@@ -105,7 +105,7 @@ class Portgroup(base.APIBase):
properties = {str: types.jsontype}
"""This portgroup's properties"""
- ports = atypes.wsattr([link.Link], readonly=True)
+ ports = None
"""Links to the collection of ports of this portgroup"""
def __init__(self, **kwargs):
@@ -133,20 +133,20 @@ class Portgroup(base.APIBase):
"""Add links to the portgroup."""
if fields is None:
portgroup.ports = [
- link.Link.make_link('self', url, 'portgroups',
- portgroup.uuid + "/ports"),
- link.Link.make_link('bookmark', url, 'portgroups',
- portgroup.uuid + "/ports", bookmark=True)
+ link.make_link('self', url, 'portgroups',
+ portgroup.uuid + "/ports"),
+ link.make_link('bookmark', url, 'portgroups',
+ portgroup.uuid + "/ports", bookmark=True)
]
# never expose the node_id attribute
portgroup.node_id = atypes.Unset
- portgroup.links = [link.Link.make_link('self', url,
- 'portgroups', portgroup.uuid),
- link.Link.make_link('bookmark', url,
- 'portgroups', portgroup.uuid,
- bookmark=True)
+ portgroup.links = [link.make_link('self', url,
+ 'portgroups', portgroup.uuid),
+ link.make_link('bookmark', url,
+ 'portgroups', portgroup.uuid,
+ bookmark=True)
]
return portgroup
diff --git a/ironic/api/controllers/v1/state.py b/ironic/api/controllers/v1/state.py
index 3fa3f7a10..f6972ff82 100644
--- a/ironic/api/controllers/v1/state.py
+++ b/ironic/api/controllers/v1/state.py
@@ -14,7 +14,6 @@
# under the License.
from ironic.api.controllers import base
-from ironic.api.controllers import link
class State(base.APIBase):
@@ -28,5 +27,5 @@ class State(base.APIBase):
available = [str]
"""A list of available states it is able to transition to"""
- links = [link.Link]
+ links = None
"""A list containing a self link and associated state links"""
diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py
index 7add9faa5..742625e6d 100644
--- a/ironic/api/controllers/v1/utils.py
+++ b/ironic/api/controllers/v1/utils.py
@@ -15,6 +15,7 @@
from http import client as http_client
import inspect
+import io
import re
import jsonpatch
@@ -24,7 +25,6 @@ import os_traits
from oslo_config import cfg
from oslo_utils import uuidutils
from pecan import rest
-from webob import static
from ironic import api
from ironic.api.controllers.v1 import versions
@@ -418,21 +418,15 @@ def vendor_passthru(ident, method, topic, data=None, driver_passthru=False):
status_code = http_client.ACCEPTED if response['async'] else http_client.OK
return_value = response['return']
- response_params = {'status_code': status_code}
# Attach the return value to the response object
if response.get('attach'):
if isinstance(return_value, str):
# If unicode, convert to bytes
return_value = return_value.encode('utf-8')
- file_ = atypes.File(content=return_value)
- api.response.app_iter = static.FileIter(file_.file)
- # Since we've attached the return value to the response
- # object the response body should now be empty.
- return_value = None
- response_params['return_type'] = None
-
- return atypes.Response(return_value, **response_params)
+ return_value = io.BytesIO(return_value)
+
+ return atypes.PassthruResponse(return_value, status_code=status_code)
def check_for_invalid_fields(fields, object_fields):
diff --git a/ironic/api/controllers/v1/volume.py b/ironic/api/controllers/v1/volume.py
index 9678ed835..0797cd389 100644
--- a/ironic/api/controllers/v1/volume.py
+++ b/ironic/api/controllers/v1/volume.py
@@ -24,7 +24,6 @@ from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import volume_connector
from ironic.api.controllers.v1 import volume_target
from ironic.api import expose
-from ironic.api import types as atypes
from ironic.common import exception
from ironic.common import policy
@@ -36,13 +35,13 @@ class Volume(base.APIBase):
targets controllers.
"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated volume links"""
- connectors = atypes.wsattr([link.Link], readonly=True)
+ connectors = None
"""Links to the volume connectors resource"""
- targets = atypes.wsattr([link.Link], readonly=True)
+ targets = None
"""Links to the volume targets resource"""
@staticmethod
@@ -57,19 +56,19 @@ class Volume(base.APIBase):
args = ''
volume.links = [
- link.Link.make_link('self', url, resource, args),
- link.Link.make_link('bookmark', url, resource, args,
- bookmark=True)]
+ link.make_link('self', url, resource, args),
+ link.make_link('bookmark', url, resource, args,
+ bookmark=True)]
volume.connectors = [
- link.Link.make_link('self', url, resource, args + 'connectors'),
- link.Link.make_link('bookmark', url, resource, args + 'connectors',
- bookmark=True)]
+ link.make_link('self', url, resource, args + 'connectors'),
+ link.make_link('bookmark', url, resource, args + 'connectors',
+ bookmark=True)]
volume.targets = [
- link.Link.make_link('self', url, resource, args + 'targets'),
- link.Link.make_link('bookmark', url, resource, args + 'targets',
- bookmark=True)]
+ link.make_link('self', url, resource, args + 'targets'),
+ link.make_link('bookmark', url, resource, args + 'targets',
+ bookmark=True)]
return volume
diff --git a/ironic/api/controllers/v1/volume_connector.py b/ironic/api/controllers/v1/volume_connector.py
index 2e4f82b58..595798cd8 100644
--- a/ironic/api/controllers/v1/volume_connector.py
+++ b/ironic/api/controllers/v1/volume_connector.py
@@ -87,7 +87,7 @@ class VolumeConnector(base.APIBase):
_set_node_identifiers, mandatory=True)
"""The UUID of the node this volume connector belongs to"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated volume connector links"""
def __init__(self, **kwargs):
@@ -117,13 +117,13 @@ class VolumeConnector(base.APIBase):
@staticmethod
def _convert_with_links(connector, url):
- connector.links = [link.Link.make_link('self', url,
- 'volume/connectors',
- connector.uuid),
- link.Link.make_link('bookmark', url,
- 'volume/connectors',
- connector.uuid,
- bookmark=True)
+ connector.links = [link.make_link('self', url,
+ 'volume/connectors',
+ connector.uuid),
+ link.make_link('bookmark', url,
+ 'volume/connectors',
+ connector.uuid,
+ bookmark=True)
]
return connector
diff --git a/ironic/api/controllers/v1/volume_target.py b/ironic/api/controllers/v1/volume_target.py
index 764dd11a5..6667bcca5 100644
--- a/ironic/api/controllers/v1/volume_target.py
+++ b/ironic/api/controllers/v1/volume_target.py
@@ -94,7 +94,7 @@ class VolumeTarget(base.APIBase):
_set_node_identifiers, mandatory=True)
"""The UUID of the node this volume target belongs to"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated volume target links"""
def __init__(self, **kwargs):
@@ -124,13 +124,13 @@ class VolumeTarget(base.APIBase):
@staticmethod
def _convert_with_links(target, url):
- target.links = [link.Link.make_link('self', url,
- 'volume/targets',
- target.uuid),
- link.Link.make_link('bookmark', url,
- 'volume/targets',
- target.uuid,
- bookmark=True)
+ target.links = [link.make_link('self', url,
+ 'volume/targets',
+ target.uuid),
+ link.make_link('bookmark', url,
+ 'volume/targets',
+ target.uuid,
+ bookmark=True)
]
return target
diff --git a/ironic/api/controllers/version.py b/ironic/api/controllers/version.py
index a24ab32c6..b8b567f56 100644
--- a/ironic/api/controllers/version.py
+++ b/ironic/api/controllers/version.py
@@ -11,53 +11,40 @@
# under the License.
from ironic import api
-from ironic.api.controllers import base
from ironic.api.controllers import link
ID_VERSION1 = 'v1'
-class Version(base.Base):
- """An API version representation.
+def all_versions():
+ return [default_version()]
- This class represents an API version, including the minimum and
- maximum minor versions that are supported within the major version.
- """
-
- id = str
- """The ID of the (major) version, also acts as the release number"""
-
- links = [link.Link]
- """A Link that point to a specific version of the API"""
-
- status = str
- """Status of the version.
- One of:
- * CURRENT - the latest version of API,
- * SUPPORTED - supported, but not latest, version of API,
- * DEPRECATED - supported, but deprecated, version of API.
- """
+def default_version():
+ """Return a dict representing the current default version
- version = str
- """The current, maximum supported (major.minor) version of API."""
+ id: The ID of the (major) version, also acts as the release number
+ links: A list containing one link that points to the current version
+ of the API
- min_version = str
- """Minimum supported (major.minor) version of API."""
+ status: Status of the version, one of CURRENT, SUPPORTED, DEPRECATED
- def __init__(self, id, min_version, version, status='CURRENT'):
- self.id = id
- self.links = [link.Link.make_link('self', api.request.public_url,
- self.id, '', bookmark=True)]
- self.status = status
- self.version = version
- self.min_version = min_version
+ min_version: The current, maximum supported (major.minor) version of API.
+ version: Minimum supported (major.minor) version of API.
+ """
-def default_version():
# NOTE(dtantsur): avoid circular imports
from ironic.api.controllers.v1 import versions
- return Version(ID_VERSION1,
- versions.min_version_string(),
- versions.max_version_string())
+ return {
+ 'id': ID_VERSION1,
+ 'links': [
+ link.make_link('self',
+ api.request.public_url,
+ ID_VERSION1, '', bookmark=True)
+ ],
+ 'status': 'CURRENT',
+ 'min_version': versions.min_version_string(),
+ 'version': versions.max_version_string()
+ }
diff --git a/ironic/api/expose.py b/ironic/api/expose.py
index 71bfa1500..16eecb1c2 100644
--- a/ironic/api/expose.py
+++ b/ironic/api/expose.py
@@ -25,8 +25,9 @@ import traceback
from oslo_config import cfg
from oslo_log import log
import pecan
-import wsme.rest.args
+from webob import static
+from ironic.api import args as api_args
from ironic.api import functions
from ironic.api import types as atypes
@@ -70,15 +71,15 @@ def expose(*args, **kwargs):
return_type = funcdef.return_type
try:
- args, kwargs = wsme.rest.args.get_args(
- funcdef, args, kwargs, pecan.request.params, None,
+ args, kwargs = api_args.get_args(
+ funcdef, args, kwargs, pecan.request.params,
pecan.request.body, pecan.request.content_type
)
result = f(self, *args, **kwargs)
# NOTE: Support setting of status_code with default 201
pecan.response.status = funcdef.status_code
- if isinstance(result, atypes.Response):
+ if isinstance(result, atypes.PassthruResponse):
pecan.response.status = result.status_code
# NOTE(lucasagomes): If the return code is 204
@@ -87,11 +88,14 @@ def expose(*args, **kwargs):
# content-length is 0
if result.status_code == 204:
return_type = None
- elif not isinstance(result.return_type,
- atypes.UnsetType):
- return_type = result.return_type
- result = result.obj
+ if callable(getattr(result.obj, 'read', None)):
+ # Stream the files-like data directly to the response
+ pecan.response.app_iter = static.FileIter(result.obj)
+ return_type = None
+ result = None
+ else:
+ result = result.obj
except Exception:
try:
diff --git a/ironic/api/method.py b/ironic/api/method.py
new file mode 100644
index 000000000..50f672a29
--- /dev/null
+++ b/ironic/api/method.py
@@ -0,0 +1,95 @@
+#
+# Copyright 2015 Rackspace, Inc
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+from http import client as http_client
+import json
+import sys
+import traceback
+
+from oslo_config import cfg
+from oslo_log import log
+import pecan
+
+LOG = log.getLogger(__name__)
+
+
+pecan_json_decorate = pecan.expose(
+ content_type='application/json',
+ generic=False)
+
+
+def expose():
+
+ def decorate(f):
+
+ @functools.wraps(f)
+ def callfunction(self, *args, **kwargs):
+ try:
+ result = f(self, *args, **kwargs)
+
+ except Exception:
+ try:
+ exception_info = sys.exc_info()
+ orig_exception = exception_info[1]
+ orig_code = getattr(orig_exception, 'code', None)
+ result = format_exception(
+ exception_info,
+ cfg.CONF.debug_tracebacks_in_api
+ )
+ finally:
+ del exception_info
+
+ if orig_code and orig_code in http_client.responses:
+ pecan.response.status = orig_code
+ else:
+ pecan.response.status = 500
+
+ return json.dumps(result)
+
+ pecan_json_decorate(callfunction)
+ return callfunction
+
+ return decorate
+
+
+def format_exception(excinfo, debug=False):
+ """Extract informations that can be sent to the client."""
+ error = excinfo[1]
+ code = getattr(error, 'code', None)
+ if code and code in http_client.responses and (400 <= code < 500):
+ faultstring = (error.faultstring if hasattr(error, 'faultstring')
+ else str(error))
+ faultcode = getattr(error, 'faultcode', 'Client')
+ r = dict(faultcode=faultcode,
+ faultstring=faultstring)
+ LOG.debug("Client-side error: %s", r['faultstring'])
+ r['debuginfo'] = None
+ return r
+ else:
+ faultstring = str(error)
+ debuginfo = "\n".join(traceback.format_exception(*excinfo))
+
+ LOG.error('Server-side error: "%s". Detail: \n%s',
+ faultstring, debuginfo)
+
+ faultcode = getattr(error, 'faultcode', 'Server')
+ r = dict(faultcode=faultcode, faultstring=faultstring)
+ if debug:
+ r['debuginfo'] = debuginfo
+ else:
+ r['debuginfo'] = None
+ return r
diff --git a/ironic/api/types.py b/ironic/api/types.py
index 0da12360b..b022e50a3 100644
--- a/ironic/api/types.py
+++ b/ironic/api/types.py
@@ -1,55 +1,709 @@
# coding: utf-8
#
-# Copyright 2020 Red Hat, Inc.
-# All Rights Reserved.
+# Copyright 2011-2019 the WSME authors and contributors
+# (See https://opendev.org/x/wsme/)
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# This module is part of WSME and is also released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from wsme.types import ArrayType # noqa
-from wsme.types import Base # noqa
-from wsme.types import DictType # noqa
-from wsme.types import Enum # noqa
-from wsme.types import File # noqa
-from wsme.types import IntegerType # noqa
-from wsme.types import iscomplex # noqa
-from wsme.types import list_attributes # noqa
-from wsme.types import registry # noqa
-from wsme.types import StringType # noqa
-from wsme.types import text # noqa
-from wsme.types import Unset # noqa
-from wsme.types import UnsetType # noqa
-from wsme.types import UserType # noqa
-from wsme.types import wsattr # noqa
-from wsme.types import wsproperty # noqa
-
-
-class Response(object):
- """Object to hold the "response" from a view function"""
- def __init__(self, obj, status_code=None, error=None,
- return_type=Unset):
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import base64
+import datetime
+import decimal
+import inspect
+import re
+import weakref
+
+from oslo_log import log
+
+from ironic.common import exception
+
+
+LOG = log.getLogger(__name__)
+
+
+pod_types = (int, bytes, str, float, bool)
+native_types = pod_types + (datetime.datetime, decimal.Decimal)
+_promotable_types = (int, str, bytes)
+
+
+class ArrayType(object):
+ def __init__(self, item_type):
+ if iscomplex(item_type):
+ self._item_type = weakref.ref(item_type)
+ else:
+ self._item_type = item_type
+
+ def __hash__(self):
+ return hash(self.item_type)
+
+ def __eq__(self, other):
+ return isinstance(other, ArrayType) \
+ and self.item_type == other.item_type
+
+ def sample(self):
+ return [getattr(self.item_type, 'sample', self.item_type)()]
+
+ @property
+ def item_type(self):
+ if isinstance(self._item_type, weakref.ref):
+ return self._item_type()
+ else:
+ return self._item_type
+
+ def validate(self, value):
+ if value is None:
+ return
+ if not isinstance(value, list):
+ raise ValueError("Wrong type. Expected '[%s]', got '%s'" % (
+ self.item_type, type(value)
+ ))
+ return [
+ validate_value(self.item_type, item)
+ for item in value
+ ]
+
+
+class DictType(object):
+ def __init__(self, key_type, value_type):
+ if key_type not in (int, bytes, str, float, bool):
+ raise ValueError("Dictionaries key can only be a pod type")
+ self.key_type = key_type
+ if iscomplex(value_type):
+ self._value_type = weakref.ref(value_type)
+ else:
+ self._value_type = value_type
+
+ def __hash__(self):
+ return hash((self.key_type, self.value_type))
+
+ def sample(self):
+ key = getattr(self.key_type, 'sample', self.key_type)()
+ value = getattr(self.value_type, 'sample', self.value_type)()
+ return {key: value}
+
+ @property
+ def value_type(self):
+ if isinstance(self._value_type, weakref.ref):
+ return self._value_type()
+ else:
+ return self._value_type
+
+ def validate(self, value):
+ if not isinstance(value, dict):
+ raise ValueError("Wrong type. Expected '{%s: %s}', got '%s'" % (
+ self.key_type, self.value_type, type(value)
+ ))
+ return dict((
+ (
+ validate_value(self.key_type, key),
+ validate_value(self.value_type, v)
+ ) for key, v in value.items()
+ ))
+
+
+class UserType(object):
+ basetype = None
+ name = None
+
+ def validate(self, value):
+ return value
+
+ def tobasetype(self, value):
+ return value
+
+ def frombasetype(self, value):
+ return value
+
+
+def isusertype(class_):
+ return isinstance(class_, UserType)
+
+
+class BinaryType(UserType):
+ """A user type that use base64 strings to carry binary data.
+
+ """
+ basetype = bytes
+ name = 'binary'
+
+ def tobasetype(self, value):
+ if value is None:
+ return None
+ return base64.encodestring(value)
+
+ def frombasetype(self, value):
+ if value is None:
+ return None
+ return base64.decodestring(value)
+
+
+#: The binary almost-native type
+binary = BinaryType()
+
+
+class IntegerType(UserType):
+ """A simple integer type. Can validate a value range.
+
+ :param minimum: Possible minimum value
+ :param maximum: Possible maximum value
+
+ Example::
+
+ Price = IntegerType(minimum=1)
+
+ """
+ basetype = int
+ name = "integer"
+
+ def __init__(self, minimum=None, maximum=None):
+ self.minimum = minimum
+ self.maximum = maximum
+
+ @staticmethod
+ def frombasetype(value):
+ return int(value) if value is not None else None
+
+ def validate(self, value):
+ if self.minimum is not None and value < self.minimum:
+ error = 'Value should be greater or equal to %s' % self.minimum
+ raise ValueError(error)
+
+ if self.maximum is not None and value > self.maximum:
+ error = 'Value should be lower or equal to %s' % self.maximum
+ raise ValueError(error)
+
+ return value
+
+
+class StringType(UserType):
+ """A simple string type. Can validate a length and a pattern.
+
+ :param min_length: Possible minimum length
+ :param max_length: Possible maximum length
+ :param pattern: Possible string pattern
+
+ Example::
+
+ Name = StringType(min_length=1, pattern='^[a-zA-Z ]*$')
+
+ """
+ basetype = str
+ name = "string"
+
+ def __init__(self, min_length=None, max_length=None, pattern=None):
+ self.min_length = min_length
+ self.max_length = max_length
+ if isinstance(pattern, str):
+ self.pattern = re.compile(pattern)
+ else:
+ self.pattern = pattern
+
+ def validate(self, value):
+ if not isinstance(value, self.basetype):
+ error = 'Value should be string'
+ raise ValueError(error)
+
+ if self.min_length is not None and len(value) < self.min_length:
+ error = 'Value should have a minimum character requirement of %s' \
+ % self.min_length
+ raise ValueError(error)
+
+ if self.max_length is not None and len(value) > self.max_length:
+ error = 'Value should have a maximum character requirement of %s' \
+ % self.max_length
+ raise ValueError(error)
+
+ if self.pattern is not None and not self.pattern.search(value):
+ error = 'Value should match the pattern %s' % self.pattern.pattern
+ raise ValueError(error)
+
+ return value
+
+
+class Enum(UserType):
+ """A simple enumeration type. Can be based on any non-complex type.
+
+ :param basetype: The actual data type
+ :param values: A set of possible values
+
+ If nullable, 'None' should be added the values set.
+
+ Example::
+
+ Gender = Enum(str, 'male', 'female')
+ Specie = Enum(str, 'cat', 'dog')
+
+ """
+ def __init__(self, basetype, *values, **kw):
+ self.basetype = basetype
+ self.values = set(values)
+ name = kw.pop('name', None)
+ if name is None:
+ name = "Enum(%s)" % ', '.join((str(v) for v in values))
+ self.name = name
+
+ def validate(self, value):
+ if value not in self.values:
+ raise ValueError("Value should be one of: %s" %
+ ', '.join(map(str, self.values)))
+ return value
+
+ def tobasetype(self, value):
+ return value
+
+ def frombasetype(self, value):
+ return value
+
+
+class UnsetType(object):
+ def __bool__(self):
+ return False
+
+ def __repr__(self):
+ return 'Unset'
+
+
+Unset = UnsetType()
+
+
+def validate_value(datatype, value):
+ if value in (Unset, None) or datatype is None:
+ return value
+
+ # Try to promote the data type to one of our complex types.
+ if isinstance(datatype, list):
+ datatype = ArrayType(datatype[0])
+ elif isinstance(datatype, dict):
+ datatype = DictType(*list(datatype.items())[0])
+
+ # If the datatype has its own validator, use that.
+ if hasattr(datatype, 'validate'):
+ return datatype.validate(value)
+
+ # Do type promotion/conversion and data validation for builtin
+ # types.
+ v_type = type(value)
+ if datatype == int:
+ if v_type in _promotable_types:
+ try:
+ # Try to turn the value into an int
+ value = datatype(value)
+ except ValueError:
+ # An error is raised at the end of the function
+ # when the types don't match.
+ pass
+ elif datatype is float and v_type in _promotable_types:
+ try:
+ value = float(value)
+ except ValueError:
+ # An error is raised at the end of the function
+ # when the types don't match.
+ pass
+ elif datatype is str and isinstance(value, bytes):
+ value = value.decode()
+ elif datatype is bytes and isinstance(value, str):
+ value = value.encode()
+
+ if not isinstance(value, datatype):
+ raise ValueError(
+ "Wrong type. Expected '%s', got '%s'" % (
+ datatype, v_type
+ ))
+ return value
+
+
+def iscomplex(datatype):
+ return inspect.isclass(datatype) \
+ and '_wsme_attributes' in datatype.__dict__
+
+
+class wsproperty(property):
+ """A specialised :class:`property` to define typed-property on complex types.
+
+ Example::
+
+ class MyComplexType(Base):
+ def get_aint(self):
+ return self._aint
+
+ def set_aint(self, value):
+ assert avalue < 10 # Dummy input validation
+ self._aint = value
+
+ aint = wsproperty(int, get_aint, set_aint, mandatory=True)
+
+ """
+ def __init__(self, datatype, fget, fset=None,
+ mandatory=False, doc=None, name=None):
+ property.__init__(self, fget, fset)
+ #: The property name in the parent python class
+ self.key = None
+ #: The attribute name on the public of the api.
+ #: Defaults to :attr:`key`
+ self.name = name
+ #: property data type
+ self.datatype = datatype
+ #: True if the property is mandatory
+ self.mandatory = mandatory
+
+
+class wsattr(object):
+ """Complex type attribute definition.
+
+ Example::
+
+ class MyComplexType(ctypes.Base):
+ optionalvalue = int
+ mandatoryvalue = wsattr(int, mandatory=True)
+ named_value = wsattr(int, name='named.value')
+
+ After inspection, the non-wsattr attributes will be replaced, and
+ the above class will be equivalent to::
+
+ class MyComplexType(ctypes.Base):
+ optionalvalue = wsattr(int)
+ mandatoryvalue = wsattr(int, mandatory=True)
+
+ """
+ def __init__(self, datatype, mandatory=False, name=None, default=Unset,
+ readonly=False):
+ #: The attribute name in the parent python class.
+ #: Set by :func:`inspect_class`
+ self.key = None # will be set by class inspection
+ #: The attribute name on the public of the api.
+ #: Defaults to :attr:`key`
+ self.name = name
+ self._datatype = (datatype,)
+ #: True if the attribute is mandatory
+ self.mandatory = mandatory
+ #: Default value. The attribute will return this instead
+ #: of :data:`Unset` if no value has been set.
+ self.default = default
+ #: If True value cannot be set from json/xml input data
+ self.readonly = readonly
+
+ self.complextype = None
+
+ def _get_dataholder(self, instance):
+ dataholder = getattr(instance, '_wsme_dataholder', None)
+ if dataholder is None:
+ dataholder = instance._wsme_DataHolderClass()
+ instance._wsme_dataholder = dataholder
+ return dataholder
+
+ def __get__(self, instance, owner):
+ if instance is None:
+ return self
+ return getattr(
+ self._get_dataholder(instance),
+ self.key,
+ self.default
+ )
+
+ def __set__(self, instance, value):
+ try:
+ value = validate_value(self.datatype, value)
+ except (ValueError, TypeError) as e:
+ raise exception.InvalidInput(self.name, value, str(e))
+ dataholder = self._get_dataholder(instance)
+ if value is Unset:
+ if hasattr(dataholder, self.key):
+ delattr(dataholder, self.key)
+ else:
+ setattr(dataholder, self.key, value)
+
+ def __delete__(self, instance):
+ self.__set__(instance, Unset)
+
+ def _get_datatype(self):
+ if isinstance(self._datatype, tuple):
+ self._datatype = \
+ self.complextype().__registry__.resolve_type(self._datatype[0])
+ if isinstance(self._datatype, weakref.ref):
+ return self._datatype()
+ if isinstance(self._datatype, list):
+ return [
+ item() if isinstance(item, weakref.ref) else item
+ for item in self._datatype
+ ]
+ return self._datatype
+
+ def _set_datatype(self, datatype):
+ self._datatype = datatype
+
+ #: attribute data type. Can be either an actual type,
+ #: or a type name, in which case the actual type will be
+ #: determined when needed (generally just before scanning the api).
+ datatype = property(_get_datatype, _set_datatype)
+
+
+def iswsattr(attr):
+ if inspect.isfunction(attr) or inspect.ismethod(attr):
+ return False
+ if isinstance(attr, property) and not isinstance(attr, wsproperty):
+ return False
+ return True
+
+
+def sort_attributes(class_, attributes):
+ """Sort a class attributes list.
+
+ 3 mechanisms are attempted :
+
+ #. Look for a _wsme_attr_order attribute on the class. This allow
+ to define an arbitrary order of the attributes (useful for
+ generated types).
+
+ #. Access the object source code to find the declaration order.
+
+ #. Sort by alphabetically
+
+ """
+
+ if not len(attributes):
+ return
+
+ attrs = dict((a.key, a) for a in attributes)
+
+ if hasattr(class_, '_wsme_attr_order'):
+ names_order = class_._wsme_attr_order
+ else:
+ names = attrs.keys()
+ names_order = []
+ try:
+ lines = []
+ for cls in inspect.getmro(class_):
+ if cls is object:
+ continue
+ lines[len(lines):] = inspect.getsourcelines(cls)[0]
+ for line in lines:
+ line = line.strip().replace(" ", "")
+ if '=' in line:
+ aname = line[:line.index('=')]
+ if aname in names and aname not in names_order:
+ names_order.append(aname)
+ if len(names_order) < len(names):
+ names_order.extend((
+ name for name in names if name not in names_order))
+ assert len(names_order) == len(names)
+ except (TypeError, IOError):
+ names_order = list(names)
+ names_order.sort()
+
+ attributes[:] = [attrs[name] for name in names_order]
+
+
+def inspect_class(class_):
+ """Extract a list of (name, wsattr|wsproperty) for the given class"""
+ attributes = []
+ for name, attr in inspect.getmembers(class_, iswsattr):
+ if name.startswith('_'):
+ continue
+ if inspect.isroutine(attr):
+ continue
+
+ if isinstance(attr, (wsattr, wsproperty)):
+ attrdef = attr
+ else:
+ if (attr not in native_types
+ and (inspect.isclass(attr) or isinstance(attr, (list, dict)))):
+ register_type(attr)
+ attrdef = getattr(class_, '__wsattrclass__', wsattr)(attr)
+
+ attrdef.key = name
+ if attrdef.name is None:
+ attrdef.name = name
+ attrdef.complextype = weakref.ref(class_)
+ attributes.append(attrdef)
+ setattr(class_, name, attrdef)
+
+ sort_attributes(class_, attributes)
+ return attributes
+
+
+def list_attributes(class_):
+ """Returns a list of a complex type attributes."""
+ if not iscomplex(class_):
+ raise TypeError("%s is not a registered type")
+ return class_._wsme_attributes
+
+
+def make_dataholder(class_):
+ # the slots are computed outside the class scope to avoid
+ # 'attr' to pullute the class namespace, which leads to weird
+ # things if one of the slots is named 'attr'.
+ slots = [attr.key for attr in class_._wsme_attributes]
+
+ class DataHolder(object):
+ __slots__ = slots
+
+ DataHolder.__name__ = class_.__name__ + 'DataHolder'
+ return DataHolder
+
+
+class Registry(object):
+ def __init__(self):
+ self._complex_types = []
+ self.array_types = set()
+ self.dict_types = set()
+
+ @property
+ def complex_types(self):
+ return [t() for t in self._complex_types if t()]
+
+ def register(self, class_):
+ """Make sure a type is registered.
+
+ It is automatically called by :class:`expose() <expose.expose>`
+ and :class:`validate() <expose.validate>`.
+ Unless you want to control when the class inspection is done there
+ is no need to call it.
+
+ """
+ if class_ is None or \
+ class_ in native_types or \
+ isinstance(class_, UserType) or iscomplex(class_) or \
+ isinstance(class_, ArrayType) or isinstance(class_, DictType):
+ return class_
+
+ if isinstance(class_, list):
+ if len(class_) != 1:
+ raise ValueError("Cannot register type %s" % repr(class_))
+ dt = ArrayType(class_[0])
+ self.register(dt.item_type)
+ self.array_types.add(dt)
+ return dt
+
+ if isinstance(class_, dict):
+ if len(class_) != 1:
+ raise ValueError("Cannot register type %s" % repr(class_))
+ dt = DictType(*list(class_.items())[0])
+ self.register(dt.value_type)
+ self.dict_types.add(dt)
+ return dt
+
+ class_._wsme_attributes = None
+ class_._wsme_attributes = inspect_class(class_)
+ class_._wsme_DataHolderClass = make_dataholder(class_)
+
+ class_.__registry__ = self
+ self._complex_types.append(weakref.ref(class_))
+ return class_
+
+ def reregister(self, class_):
+ """Register a type which may already have been registered.
+
+ """
+ self._unregister(class_)
+ return self.register(class_)
+
+ def _unregister(self, class_):
+ """Remove a previously registered type.
+
+ """
+ # Clear the existing attribute reference so it is rebuilt if
+ # the class is registered again later.
+ if hasattr(class_, '_wsme_attributes'):
+ del class_._wsme_attributes
+ # FIXME(dhellmann): This method does not recurse through the
+ # types like register() does. Should it?
+ if isinstance(class_, list):
+ at = ArrayType(class_[0])
+ try:
+ self.array_types.remove(at)
+ except KeyError:
+ pass
+ elif isinstance(class_, dict):
+ key_type, value_type = list(class_.items())[0]
+ self.dict_types = set(
+ dt for dt in self.dict_types
+ if (dt.key_type, dt.value_type) != (key_type, value_type)
+ )
+ # We can't use remove() here because the items in
+ # _complex_types are weakref objects pointing to the classes,
+ # so we can't compare with them directly.
+ self._complex_types = [
+ ct for ct in self._complex_types
+ if ct() is not class_
+ ]
+
+ def lookup(self, typename):
+ LOG.debug('Lookup %s', typename)
+ modname = None
+ if '.' in typename:
+ modname, typename = typename.rsplit('.', 1)
+ for ct in self._complex_types:
+ ct = ct()
+ if ct is not None and typename == ct.__name__ and (
+ modname is None or modname == ct.__module__):
+ return ct
+
+ def resolve_type(self, type_):
+ if isinstance(type_, str):
+ return self.lookup(type_)
+ if isinstance(type_, list):
+ type_ = ArrayType(type_[0])
+ if isinstance(type_, dict):
+ type_ = DictType(list(type_.keys())[0], list(type_.values())[0])
+ if isinstance(type_, ArrayType):
+ type_ = ArrayType(self.resolve_type(type_.item_type))
+ self.array_types.add(type_)
+ elif isinstance(type_, DictType):
+ type_ = DictType(
+ type_.key_type,
+ self.resolve_type(type_.value_type)
+ )
+ self.dict_types.add(type_)
+ else:
+ type_ = self.register(type_)
+ return type_
+
+
+# Default type registry
+registry = Registry()
+
+
+def register_type(class_):
+ return registry.register(class_)
+
+
+class BaseMeta(type):
+ def __new__(cls, name, bases, dct):
+ if bases and bases[0] is not object and '__registry__' not in dct:
+ dct['__registry__'] = registry
+ return type.__new__(cls, name, bases, dct)
+
+ def __init__(cls, name, bases, dct):
+ if bases and bases[0] is not object and cls.__registry__:
+ cls.__registry__.register(cls)
+
+
+class Base(metaclass=BaseMeta):
+ """Base type for complex types"""
+ def __init__(self, **kw):
+ for key, value in kw.items():
+ if hasattr(self, key):
+ setattr(self, key, value)
+
+
+class PassthruResponse(object):
+ """Object to hold the "response" from a passthru call"""
+ def __init__(self, obj, status_code=None):
#: Store the result object from the view
self.obj = obj
#: Store an optional status_code
self.status_code = status_code
-
- #: Return error details
- #: Must be a dictionnary with the following keys: faultcode,
- #: faultstring and an optional debuginfo
- self.error = error
-
- #: Return type
- #: Type of the value returned by the function
- #: If the return type is wsme.types.Unset it will be ignored
- #: and the default return type will prevail.
- self.return_type = return_type
diff --git a/ironic/common/exception.py b/ironic/common/exception.py
index a314e75e0..1ade17253 100644
--- a/ironic/common/exception.py
+++ b/ironic/common/exception.py
@@ -20,7 +20,6 @@ from http import client as http_client
from ironic_lib.exception import IronicException
from oslo_log import log as logging
-import wsme
from ironic.common.i18n import _
@@ -713,8 +712,21 @@ class IBMCConnectionError(IBMCError):
_msg_fmt = _("IBMC connection failed for node %(node)s: %(error)s")
-class ClientSideError(wsme.exc.ClientSideError):
- pass
+class ClientSideError(RuntimeError):
+ def __init__(self, msg=None, status_code=400, faultcode='Client'):
+ self.msg = msg
+ self.code = status_code
+ self.faultcode = faultcode
+ super(ClientSideError, self).__init__(self.faultstring)
+
+ @property
+ def faultstring(self):
+ if self.msg is None:
+ return str(self)
+ elif isinstance(self.msg, str):
+ return self.msg
+ else:
+ return str(self.msg)
class NodeIsRetired(Invalid):
@@ -725,3 +737,75 @@ class NodeIsRetired(Invalid):
class NoFreeIPMITerminalPorts(TemporaryFailure):
_msg_fmt = _("Unable to allocate a free port on host %(host)s for IPMI "
"terminal, not enough free ports.")
+
+
+class InvalidInput(ClientSideError):
+ def __init__(self, fieldname, value, msg=''):
+ self.fieldname = fieldname
+ self.value = value
+ super(InvalidInput, self).__init__(msg)
+
+ @property
+ def faultstring(self):
+ return _(
+ "Invalid input for field/attribute %(fieldname)s. "
+ "Value: '%(value)s'. %(msg)s"
+ ) % {
+ 'fieldname': self.fieldname,
+ 'value': self.value,
+ 'msg': self.msg
+ }
+
+
+class UnknownArgument(ClientSideError):
+ def __init__(self, argname, msg=''):
+ self.argname = argname
+ super(UnknownArgument, self).__init__(msg)
+
+ @property
+ def faultstring(self):
+ return _('Unknown argument: "%(argname)s"%(msg)s') % {
+ 'argname': self.argname,
+ 'msg': self.msg and ": " + self.msg or ""
+ }
+
+
+class MissingArgument(ClientSideError):
+ def __init__(self, argname, msg=''):
+ self.argname = argname
+ super(MissingArgument, self).__init__(msg)
+
+ @property
+ def faultstring(self):
+ return _('Missing argument: "%(argname)s"%(msg)s') % {
+ 'argname': self.argname,
+ 'msg': self.msg and ": " + self.msg or ""
+ }
+
+
+class UnknownAttribute(ClientSideError):
+ def __init__(self, fieldname, attributes, msg=''):
+ self.fieldname = fieldname
+ self.attributes = attributes
+ self.msg = msg
+ super(UnknownAttribute, self).__init__(self.msg)
+
+ @property
+ def faultstring(self):
+ error = _("Unknown attribute for argument %(argn)s: %(attrs)s")
+ if len(self.attributes) > 1:
+ error = _("Unknown attributes for argument %(argn)s: %(attrs)s")
+ str_attrs = ", ".join(self.attributes)
+ return error % {'argn': self.fieldname, 'attrs': str_attrs}
+
+ def add_fieldname(self, name):
+ """Add a fieldname to concatenate the full name.
+
+ Add a fieldname so that the whole hierarchy is displayed. Successive
+ calls to this method will prepend ``name`` to the hierarchy of names.
+ """
+ if self.fieldname is not None:
+ self.fieldname = "{}.{}".format(name, self.fieldname)
+ else:
+ self.fieldname = name
+ super(UnknownAttribute, self).__init__(self.msg)
diff --git a/ironic/common/images.py b/ironic/common/images.py
index f1dc7ad15..31332df76 100644
--- a/ironic/common/images.py
+++ b/ironic/common/images.py
@@ -526,7 +526,7 @@ def get_temp_url_for_glance_image(context, image_uuid):
def create_boot_iso(context, output_filename, kernel_href,
ramdisk_href, deploy_iso_href=None, esp_image_href=None,
root_uuid=None, kernel_params=None, boot_mode=None,
- configdrive_href=None):
+ configdrive_href=None, base_iso=None):
"""Creates a bootable ISO image for a node.
Given the hrefs for kernel, ramdisk, root partition's UUID and
@@ -553,14 +553,26 @@ def create_boot_iso(context, output_filename, kernel_href,
:param configdrive_href: URL to ISO9660 or FAT-formatted OpenStack config
drive image. This image will be embedded into the built ISO image.
Optional.
+ :param base_iso: URL or glance UUID of a to be used as an override of
+ what should be retrieved for to use, instead of building an ISO
+ bootable ramdisk.
:raises: ImageCreationFailed, if creating boot ISO failed.
"""
with utils.tempdir() as tmpdir:
- kernel_path = os.path.join(tmpdir, kernel_href.split('/')[-1])
- ramdisk_path = os.path.join(tmpdir, ramdisk_href.split('/')[-1])
-
- fetch(context, kernel_href, kernel_path)
- fetch(context, ramdisk_href, ramdisk_path)
+ if base_iso:
+ # NOTE(TheJulia): Eventually we want to use the creation method
+ # to perform the massaging of the image, because oddly enough
+ # we need to do all the same basic things, just a little
+ # differently.
+ fetch(context, base_iso, output_filename)
+ # Temporary, return to the caller until we support the combined
+ # operation.
+ return
+ else:
+ kernel_path = os.path.join(tmpdir, kernel_href.split('/')[-1])
+ ramdisk_path = os.path.join(tmpdir, ramdisk_href.split('/')[-1])
+ fetch(context, kernel_href, kernel_path)
+ fetch(context, ramdisk_href, ramdisk_path)
if configdrive_href:
configdrive_path = os.path.join(
@@ -592,7 +604,11 @@ def create_boot_iso(context, output_filename, kernel_href,
elif CONF.esp_image:
esp_image_path = CONF.esp_image
-
+ # TODO(TheJulia): we should opportunisticly try to make bios
+ # bootable and UEFI. In other words, collapse a lot of this
+ # path since they are not mutually exclusive.
+ # UEFI boot mode, but Network iPXE -> ISO means bios bootable
+ # contents are still required.
create_esp_image_for_uefi(
output_filename, kernel_path, ramdisk_path,
deploy_iso=deploy_iso_path, esp_image=esp_image_path,
diff --git a/ironic/common/json_rpc/server.py b/ironic/common/json_rpc/server.py
index 0f9db5fc9..2fdab0c4f 100644
--- a/ironic/common/json_rpc/server.py
+++ b/ironic/common/json_rpc/server.py
@@ -39,7 +39,7 @@ from ironic.common import json_rpc
CONF = cfg.CONF
LOG = log.getLogger(__name__)
-_BLACK_LIST = {'init_host', 'del_host', 'target', 'iter_nodes'}
+_DENY_LIST = {'init_host', 'del_host', 'target', 'iter_nodes'}
def _build_method_map(manager):
@@ -50,7 +50,7 @@ def _build_method_map(manager):
"""
result = {}
for method in dir(manager):
- if method.startswith('_') or method in _BLACK_LIST:
+ if method.startswith('_') or method in _DENY_LIST:
continue
func = getattr(manager, method)
if not callable(func):
diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py
index 7182cd357..15a4201b5 100644
--- a/ironic/common/neutron.py
+++ b/ironic/common/neutron.py
@@ -11,12 +11,12 @@
# under the License.
import copy
+import ipaddress
from keystoneauth1 import loading as ks_loading
from neutronclient.common import exceptions as neutron_exceptions
from neutronclient.v2_0 import client as clientv20
from oslo_log import log
-from oslo_utils import netutils
from oslo_utils import uuidutils
import retrying
@@ -247,7 +247,8 @@ def _add_ip_addresses_for_ipv6_stateful(context, port, client):
"""
fixed_ips = port['port']['fixed_ips']
if (not fixed_ips
- or not netutils.is_valid_ipv6(fixed_ips[0]['ip_address'])):
+ or ipaddress.ip_address(
+ fixed_ips[0]['ip_address']).version != 6):
return
subnet = client.show_subnet(
@@ -474,6 +475,160 @@ def remove_neutron_ports(task, params):
{'node_uuid': node_uuid})
+def _uncidr(cidr, ipv6=False):
+ """Convert CIDR network representation into network/netmask form
+
+ :param cidr: network in CIDR form
+ :param ipv6: if `True`, consider `cidr` being IPv6
+ :returns: a tuple of network/host number in dotted
+ decimal notation, netmask in dotted decimal notation
+
+ """
+ net = ipaddress.ip_interface(cidr).network
+ return str(net.network_address), str(net.netmask)
+
+
+def get_neutron_port_data(port_id, vif_id, client=None, context=None):
+ """Gather Neutron port and network configuration
+
+ Query Neutron for port and network configuration, return whatever
+ is available.
+
+ :param port_id: ironic port/portgroup ID.
+ :param vif_id: Neutron port ID.
+ :param client: Optional a Neutron client object.
+ :param context: request context
+ :type context: ironic.common.context.RequestContext
+ :raises: NetworkError
+ :returns: a dict holding network configuration information
+ associated with this ironic or Neutron port.
+ """
+
+ if not client:
+ client = get_client(context=context)
+
+ try:
+ port_config = client.show_port(
+ vif_id, fields=['id', 'name', 'dns_assignment', 'fixed_ips',
+ 'mac_address', 'network_id'])
+
+ except neutron_exceptions.NeutronClientException as e:
+ msg = (_('Unable to get port info for %(port_id)s. Error: '
+ '%(err)s') % {'port_id': vif_id, 'err': e})
+ LOG.exception(msg)
+ raise exception.NetworkError(msg)
+
+ LOG.debug('Received port %(port)s data: %(info)s',
+ {'port': vif_id, 'info': port_config})
+
+ port_config = port_config['port']
+
+ port_id = port_config['name'] or port_id
+
+ network_id = port_config.get('network_id')
+
+ try:
+ network_config = client.show_network(
+ network_id, fields=['id', 'mtu', 'subnets'])
+
+ except neutron_exceptions.NeutronClientException as e:
+ msg = (_('Unable to get network info for %(network_id)s. Error: '
+ '%(err)s') % {'network_id': network_id, 'err': e})
+ LOG.exception(msg)
+ raise exception.NetworkError(msg)
+
+ LOG.debug('Received network %(network)s data: %(info)s',
+ {'network': network_id, 'info': network_config})
+
+ network_config = network_config['network']
+
+ subnets_config = {}
+
+ network_data = {
+ 'links': [
+ {
+ 'id': port_id,
+ 'type': 'vif',
+ 'ethernet_mac_address': port_config['mac_address'],
+ 'vif_id': port_config['id'],
+ 'mtu': network_config['mtu']
+ }
+ ],
+ 'networks': [
+
+ ]
+ }
+
+ for fixed_ip in port_config.get('fixed_ips', []):
+ subnet_id = fixed_ip['subnet_id']
+
+ try:
+ subnet_config = client.show_subnet(
+ subnet_id, fields=['id', 'name', 'enable_dhcp',
+ 'dns_nameservers', 'host_routes',
+ 'ip_version', 'gateway_ip', 'cidr'])
+
+ LOG.debug('Received subnet %(subnet)s data: %(info)s',
+ {'subnet': subnet_id, 'info': subnet_config})
+
+ subnets_config[subnet_id] = subnet_config['subnet']
+
+ except neutron_exceptions.NeutronClientException as e:
+ msg = (_('Unable to get subnet info for %(subnet_id)s. Error: '
+ '%(err)s') % {'subnet_id': subnet_id, 'err': e})
+ LOG.exception(msg)
+ raise exception.NetworkError(msg)
+
+ subnet_config = subnets_config[subnet_id]
+
+ subnet_network, netmask = _uncidr(
+ subnet_config['cidr'], subnet_config['ip_version'] == 6)
+
+ network = {
+ 'id': fixed_ip['subnet_id'],
+ 'network_id': port_config['network_id'],
+ 'type': 'ipv%s' % subnet_config['ip_version'],
+ 'link': port_id,
+ 'ip_address': fixed_ip['ip_address'],
+ 'netmask': netmask,
+ 'routes': [
+
+ ]
+ }
+
+ # TODO(etingof): Adding default route if gateway is present.
+ # This is a hack, Neutron should have given us a route.
+
+ if subnet_config['gateway_ip']:
+ zero_addr = ('::0' if subnet_config['ip_version'] == 6
+ else '0.0.0.0')
+
+ route = {
+ 'network': zero_addr,
+ 'netmask': zero_addr,
+ 'gateway': subnet_config['gateway_ip']
+ }
+
+ network['routes'].append(route)
+
+ for host_config in subnet_config['host_routes']:
+ subnet_network, netmask = _uncidr(
+ host_config['destination'],
+ subnet_config['ip_version'] == 6)
+
+ route = {
+ 'network': subnet_network,
+ 'netmask': netmask,
+ 'gateway': host_config['nexthop']
+ }
+
+ network['routes'].append(route)
+
+ network_data['networks'].append(network)
+
+ return network_data
+
+
def get_node_portmap(task):
"""Extract the switch port information for the node.
diff --git a/ironic/common/pxe_utils.py b/ironic/common/pxe_utils.py
index 3f6ebb0f7..6cee17712 100644
--- a/ironic/common/pxe_utils.py
+++ b/ironic/common/pxe_utils.py
@@ -265,7 +265,10 @@ def create_pxe_config(task, pxe_options, template=None, ipxe_enabled=False):
"""
LOG.debug("Building PXE config for node %s", task.node.uuid)
if template is None:
- template = deploy_utils.get_pxe_config_template(task.node)
+ if ipxe_enabled:
+ template = deploy_utils.get_ipxe_config_template(task.node)
+ else:
+ template = deploy_utils.get_pxe_config_template(task.node)
_ensure_config_dirs_exist(task, ipxe_enabled)
@@ -384,7 +387,16 @@ def _dhcp_option_file_or_url(task, urlboot=False, ip_version=None):
to return options for DHCP. Possible options
are 4, and 6.
"""
- boot_file = deploy_utils.get_pxe_boot_file(task.node)
+ try:
+ if task.driver.boot.ipxe_enabled:
+ boot_file = deploy_utils.get_ipxe_boot_file(task.node)
+ else:
+ boot_file = deploy_utils.get_pxe_boot_file(task.node)
+ except AttributeError:
+ # Support boot interfaces that lack an explicit ipxe_enabled
+ # attribute flag.
+ boot_file = deploy_utils.get_pxe_boot_file(task.node)
+
# NOTE(TheJulia): There are additional cases as we add new
# features, so the logic below is in the form of if/elif/elif
if not urlboot:
@@ -800,7 +812,10 @@ def build_service_pxe_config(task, instance_image_info,
pxe_options = build_pxe_config_options(task, instance_image_info,
service=True,
ipxe_enabled=ipxe_enabled)
- pxe_config_template = deploy_utils.get_pxe_config_template(node)
+ if ipxe_enabled:
+ pxe_config_template = deploy_utils.get_ipxe_config_template(node)
+ else:
+ pxe_config_template = deploy_utils.get_pxe_config_template(node)
create_pxe_config(task, pxe_options, pxe_config_template,
ipxe_enabled=ipxe_enabled)
@@ -942,8 +957,12 @@ def prepare_instance_pxe_config(task, image_info,
pxe_options = build_pxe_config_options(
task, image_info, service=ramdisk_boot,
ipxe_enabled=ipxe_enabled)
- pxe_config_template = (
- deploy_utils.get_pxe_config_template(node))
+ if ipxe_enabled:
+ pxe_config_template = (
+ deploy_utils.get_ipxe_config_template(node))
+ else:
+ pxe_config_template = (
+ deploy_utils.get_pxe_config_template(node))
create_pxe_config(
task, pxe_options, pxe_config_template,
ipxe_enabled=ipxe_enabled)
diff --git a/ironic/common/utils.py b/ironic/common/utils.py
index 2d389af59..7cc0199bf 100644
--- a/ironic/common/utils.py
+++ b/ironic/common/utils.py
@@ -22,6 +22,7 @@ import contextlib
import datetime
import errno
import hashlib
+import ipaddress
import os
import re
import shutil
@@ -42,6 +43,15 @@ from ironic.conf import CONF
LOG = logging.getLogger(__name__)
+DATE_RE = r'(?P<year>-?\d{4,})-(?P<month>\d{2})-(?P<day>\d{2})'
+TIME_RE = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' + \
+ r'(\.(?P<sec_frac>\d+))?'
+TZ_RE = r'((?P<tz_sign>[+-])(?P<tz_hour>\d{2}):(?P<tz_min>\d{2}))' + \
+ r'|(?P<tz_z>Z)'
+
+DATETIME_RE = re.compile(
+ '%sT%s(%s)?' % (DATE_RE, TIME_RE, TZ_RE))
+
warn_deprecated_extra_vif_port_id = False
@@ -567,6 +577,6 @@ def pop_node_nested_field(node, collection, field, default=None):
def wrap_ipv6(ip):
"""Wrap the address in square brackets if it's an IPv6 address."""
- if netutils.is_valid_ipv6(ip):
+ if ipaddress.ip_address(ip).version == 6:
return "[%s]" % ip
return ip
diff --git a/ironic/conductor/deployments.py b/ironic/conductor/deployments.py
index d0d3acc39..780b302c5 100644
--- a/ironic/conductor/deployments.py
+++ b/ironic/conductor/deployments.py
@@ -98,7 +98,7 @@ def start_deploy(task, manager, configdrive=None, event='deploy'):
task.driver.power.validate(task)
task.driver.deploy.validate(task)
utils.validate_instance_info_traits(task.node)
- conductor_steps.validate_deploy_templates(task)
+ conductor_steps.validate_deploy_templates(task, skip_missing=True)
except exception.InvalidParameterValue as e:
raise exception.InstanceDeployFailure(
_("Failed to validate deploy or power info for node "
@@ -268,6 +268,13 @@ def do_next_deploy_step(task, step_index, conductor_id):
_("Failed to deploy. Exception: %s") % e, traceback=True)
return
+ if task.node.provision_state == states.DEPLOYFAIL:
+ # NOTE(dtantsur): some deploy steps do not raise but rather update
+ # the node and return. Take them into account.
+ LOG.debug('Node %s is in error state, not processing '
+ 'the remaining deploy steps', task.node)
+ return
+
if ind == 0:
# We've done the very first deploy step.
# Update conductor_affinity to reference this conductor's ID
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index 8f226f089..222ad5507 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -209,6 +209,7 @@ class ConductorManager(base_manager.BaseConductorManager):
"updated unless it is in one of allowed "
"(%(allowed)s) states or in maintenance mode.")
updating_driver = 'driver' in delta
+ check_interfaces = updating_driver
for iface in drivers_base.ALL_INTERFACES:
interface_field = '%s_interface' % iface
if interface_field not in delta:
@@ -224,7 +225,10 @@ class ConductorManager(base_manager.BaseConductorManager):
'allowed': ', '.join(allowed_update_states),
'field': interface_field})
- driver_factory.check_and_update_node_interfaces(node_obj)
+ check_interfaces = True
+
+ if check_interfaces:
+ driver_factory.check_and_update_node_interfaces(node_obj)
# NOTE(dtantsur): if we're updating the driver from an invalid value,
# loading the old driver may be impossible. Since we only need to
@@ -1988,6 +1992,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# we would disallow it otherwise. That's done for recovering hopelessly
# broken nodes (e.g. with broken BMC).
with task_manager.acquire(context, node_id,
+ load_driver=False,
purpose='node deletion') as task:
node = task.node
if not node.maintenance and node.instance_uuid is not None:
@@ -2022,6 +2027,17 @@ class ConductorManager(base_manager.BaseConductorManager):
if node.console_enabled:
notify_utils.emit_console_notification(
task, 'console_set', fields.NotificationStatus.START)
+
+ try:
+ task.load_driver()
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception('Could not load the driver for node %s '
+ 'to shut down its console', node.uuid)
+ notify_utils.emit_console_notification(
+ task, 'console_set',
+ fields.NotificationStatus.ERROR)
+
try:
task.driver.console.stop_console(task)
except Exception as err:
@@ -2058,7 +2074,7 @@ class ConductorManager(base_manager.BaseConductorManager):
with task_manager.acquire(context, port.node_id,
purpose='port deletion') as task:
vif, vif_use = utils.get_attached_vif(port)
- if vif:
+ if vif and not task.node.maintenance:
msg = _("Cannot delete the port %(port)s as it is bound "
"to VIF %(vif)s for %(use)s use.")
raise exception.InvalidState(
@@ -3064,6 +3080,7 @@ class ConductorManager(base_manager.BaseConductorManager):
return raid_iface.get_logical_disk_properties()
@METRICS.timer('ConductorManager.heartbeat')
+ @messaging.expected_exceptions(exception.InvalidParameterValue)
@messaging.expected_exceptions(exception.NoFreeConductorWorker)
def heartbeat(self, context, node_id, callback_url, agent_version=None,
agent_token=None):
@@ -3652,7 +3669,8 @@ def do_sync_power_state(task, count):
handle_sync_power_state_max_retries_exceeded(task, power_state)
return count
- if CONF.conductor.force_power_state_during_sync:
+ if (CONF.conductor.force_power_state_during_sync
+ and task.driver.power.supports_power_sync(task)):
LOG.warning("During sync_power_state, node %(node)s state "
"'%(actual)s' does not match expected state. "
"Changing hardware state to '%(state)s'.",
diff --git a/ironic/conductor/rpcapi.py b/ironic/conductor/rpcapi.py
index 64447b61a..8752914ac 100644
--- a/ironic/conductor/rpcapi.py
+++ b/ironic/conductor/rpcapi.py
@@ -907,6 +907,7 @@ class ConductorAPI(object):
:param topic: RPC topic. Defaults to self.topic.
:param agent_token: randomly generated validation token.
:param agent_version: the version of the agent that is heartbeating
+ :raises: InvalidParameterValue if an invalid agent token is received.
"""
new_kws = {}
version = '1.34'
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index 5fb42f247..b4089d618 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -253,6 +253,10 @@ class TaskManager(object):
self.fsm.initialize(start_state=self.node.provision_state,
target_state=self.node.target_provision_state)
+ def load_driver(self):
+ if self.driver is None:
+ self.driver = driver_factory.build_driver_for_task(self)
+
def _lock(self):
self._debug_timer.restart()
@@ -332,6 +336,19 @@ class TaskManager(object):
self._on_error_args = args
self._on_error_kwargs = kwargs
+ def downgrade_lock(self):
+ """Downgrade the lock to a shared one."""
+ if self.node is None:
+ raise RuntimeError("Cannot downgrade an already released lock")
+
+ if not self.shared:
+ objects.Node.release(self.context, CONF.host, self.node.id)
+ self.shared = True
+ self.node.refresh()
+ LOG.debug("Successfully downgraded lock for %(purpose)s "
+ "on node %(node)s",
+ {'purpose': self._purpose, 'node': self.node.uuid})
+
def release_resources(self):
"""Unlock a node and release resources.
diff --git a/ironic/conductor/utils.py b/ironic/conductor/utils.py
index 4b994db86..c64dc9f5a 100644
--- a/ironic/conductor/utils.py
+++ b/ironic/conductor/utils.py
@@ -284,6 +284,13 @@ def node_power_action(task, new_state, timeout=None):
driver_internal_info = node.driver_internal_info
driver_internal_info['last_power_state_change'] = str(
timeutils.utcnow().isoformat())
+ # NOTE(dtantsur): wipe token on shutting down, otherwise a reboot in
+ # fast-track (or an accidentally booted agent) will cause subsequent
+ # actions to fail.
+ if target_state in (states.POWER_OFF, states.SOFT_POWER_OFF,
+ states.REBOOT, states.SOFT_REBOOT):
+ if not is_agent_token_pregenerated(node):
+ driver_internal_info.pop('agent_secret_token', False)
node.driver_internal_info = driver_internal_info
node.save()
@@ -956,6 +963,21 @@ def value_within_timeout(value, timeout):
return last_valid <= last
+def agent_is_alive(node, timeout=None):
+ """Check that the agent is likely alive.
+
+ The method then checks for the last agent heartbeat, and if it occured
+ within the timeout set by [deploy]fast_track_timeout, then agent is
+ presumed alive.
+
+ :param node: A node object.
+ :param timeout: Heartbeat timeout, defaults to `fast_track_timeout`.
+ """
+ return value_within_timeout(
+ node.driver_internal_info.get('agent_last_heartbeat'),
+ timeout or CONF.deploy.fast_track_timeout)
+
+
def is_fast_track(task):
"""Checks a fast track is available.
@@ -972,11 +994,23 @@ def is_fast_track(task):
:returns: True if the last heartbeat that was recorded was within
the [deploy]fast_track_timeout setting.
"""
- return (fast_track_able(task)
- and value_within_timeout(
- task.node.driver_internal_info.get('agent_last_heartbeat'),
- CONF.deploy.fast_track_timeout)
- and task.driver.power.get_power_state(task) == states.POWER_ON)
+ if (not fast_track_able(task)
+ or task.driver.power.get_power_state(task) != states.POWER_ON):
+ if task.node.last_error:
+ LOG.debug('Node %(node)s is not fast-track-able because it has '
+ 'an error: %(error)s',
+ {'node': task.node.uuid, 'error': task.node.last_error})
+ return False
+
+ if agent_is_alive(task.node):
+ return True
+ else:
+ LOG.debug('Node %(node)s should be fast-track-able, but the agent '
+ 'doesn\'t seem to be running. Last heartbeat: %(last)s',
+ {'node': task.node.uuid,
+ 'last': task.node.driver_internal_info.get(
+ 'agent_last_heartbeat')})
+ return False
def remove_agent_url(node):
@@ -1175,4 +1209,7 @@ def get_attached_vif(port):
rescue_vif = port.internal_info.get('rescuing_vif_port_id')
if rescue_vif:
return (rescue_vif, 'rescuing')
+ inspection_vif = port.internal_info.get('inspection_vif_port_id')
+ if inspection_vif:
+ return (inspection_vif, 'inspecting')
return (None, None)
diff --git a/ironic/conf/deploy.py b/ironic/conf/deploy.py
index 8be4758b0..cae1b123f 100644
--- a/ironic/conf/deploy.py
+++ b/ironic/conf/deploy.py
@@ -147,6 +147,17 @@ opts = [
'Test" and typical ramdisk start-up. This value should '
'not exceed the [api]ramdisk_heartbeat_timeout '
'setting.')),
+ cfg.BoolOpt('erase_skip_read_only',
+ default=False,
+ mutable=True,
+ help=_('If the ironic-python-agent should skip read-only '
+ 'devices when running the "erase_devices" clean step '
+ 'where block devices are zeroed out. This requires '
+ 'ironic-python-agent 6.0.0 or greater. By default '
+ 'a read-only device will cause non-metadata based '
+ 'cleaning operations to fail due to the possible '
+ 'operational security risk of data being retained '
+ 'between deployments of the bare metal node.')),
]
diff --git a/ironic/conf/pxe.py b/ironic/conf/pxe.py
index 0e8ff5e37..2ddf13e76 100644
--- a/ironic/conf/pxe.py
+++ b/ironic/conf/pxe.py
@@ -54,14 +54,21 @@ opts = [
'$pybasedir', 'drivers/modules/pxe_config.template'),
mutable=True,
help=_('On ironic-conductor node, template file for PXE '
- 'configuration.')),
+ 'loader configuration.')),
+ cfg.StrOpt('ipxe_config_template',
+ default=os.path.join(
+ '$pybasedir', 'drivers/modules/ipxe_config.template'),
+ mutable=True,
+ help=_('On ironic-conductor node, template file for iPXE '
+ 'operations.')),
cfg.StrOpt('uefi_pxe_config_template',
default=os.path.join(
'$pybasedir',
'drivers/modules/pxe_grub_config.template'),
mutable=True,
help=_('On ironic-conductor node, template file for PXE '
- 'configuration for UEFI boot loader.')),
+ 'configuration for UEFI boot loader. Generally this '
+ 'is used for GRUB specific templates.')),
cfg.DictOpt('pxe_config_template_by_arch',
default={},
mutable=True,
@@ -107,10 +114,22 @@ opts = [
cfg.StrOpt('uefi_pxe_bootfile_name',
default='bootx64.efi',
help=_('Bootfile DHCP parameter for UEFI boot mode.')),
+ cfg.StrOpt('ipxe_bootfile_name',
+ default='undionly.kpxe',
+ help=_('Bootfile DHCP parameter.')),
+ cfg.StrOpt('uefi_ipxe_bootfile_name',
+ default='ipxe.efi',
+ help=_('Bootfile DHCP parameter for UEFI boot mode. If you '
+ 'experience problems with booting using it, try '
+ 'snponly.efi.')),
cfg.DictOpt('pxe_bootfile_name_by_arch',
default={},
help=_('Bootfile DHCP parameter per node architecture. '
'For example: aarch64:grubaa64.efi')),
+ cfg.DictOpt('ipxe_bootfile_name_by_arch',
+ default={},
+ help=_('Bootfile DHCP parameter per node architecture. '
+ 'For example: aarch64:ipxe_aa64.efi')),
cfg.StrOpt('ipxe_boot_script',
default=os.path.join(
'$pybasedir', 'drivers/modules/boot.ipxe'),
diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py
index 4b0210cb3..372858742 100644
--- a/ironic/dhcp/neutron.py
+++ b/ironic/dhcp/neutron.py
@@ -19,7 +19,6 @@ import time
from neutronclient.common import exceptions as neutron_client_exc
from oslo_log import log as logging
-from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
@@ -187,12 +186,18 @@ class NeutronDHCPApi(base.BaseDHCP):
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
- if netutils.is_valid_ipv4(ip_address):
- return ip_address
- else:
- LOG.error("Neutron returned invalid IPv4 "
- "address %(ip_address)s on port %(port_uuid)s.",
- {'ip_address': ip_address, 'port_uuid': port_uuid})
+ try:
+ if ipaddress.ip_address(ip_address).version == 4:
+ return ip_address
+ else:
+ LOG.error("Neutron returned invalid IPv4 "
+ "address %(ip_address)s on port %(port_uuid)s.",
+ {'ip_address': ip_address,
+ 'port_uuid': port_uuid})
+ raise exception.InvalidIPv4Address(ip_address=ip_address)
+ except ValueError as exc:
+ LOG.error("An Invalid IP address was supplied and failed "
+ "basic validation: %s", exc)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error("No IP address assigned to Neutron port %s.",
diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py
index 3fbecba49..c33ad05b4 100644
--- a/ironic/drivers/base.py
+++ b/ironic/drivers/base.py
@@ -616,6 +616,18 @@ class PowerInterface(BaseInterface):
"""
return [states.POWER_ON, states.POWER_OFF, states.REBOOT]
+ def supports_power_sync(self, task):
+ """Check if power sync is supported for the given node.
+
+ If ``False``, the conductor will simply store whatever
+ ``get_power_state`` returns in the database instead of trying
+ to force the expected power state.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :returns: boolean, whether power sync is supported.
+ """
+ return True
+
class ConsoleInterface(BaseInterface):
"""Interface for console-related actions."""
diff --git a/ironic/drivers/drac.py b/ironic/drivers/drac.py
index 430105eae..453f17876 100644
--- a/ironic/drivers/drac.py
+++ b/ironic/drivers/drac.py
@@ -25,7 +25,6 @@ from ironic.drivers.modules.drac import management
from ironic.drivers.modules.drac import power
from ironic.drivers.modules.drac import raid
from ironic.drivers.modules.drac import vendor_passthru
-from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import noop
from ironic.drivers.modules import pxe
@@ -69,13 +68,14 @@ class IDRACHardware(generic.GenericHardware):
# if it is enabled by an operator (implying that the service is
# installed).
return [drac_inspect.DracWSManInspect, drac_inspect.DracInspect,
- drac_inspect.DracRedfishInspect, inspector.Inspector,
- noop.NoInspect]
+ drac_inspect.DracRedfishInspect] + super(
+ IDRACHardware, self).supported_inspect_interfaces
@property
def supported_raid_interfaces(self):
"""List of supported raid interfaces."""
- return [raid.DracWSManRAID, raid.DracRAID, noop.NoRAID]
+ return [raid.DracWSManRAID, raid.DracRAID] + super(
+ IDRACHardware, self).supported_raid_interfaces
@property
def supported_vendor_interfaces(self):
diff --git a/ironic/drivers/generic.py b/ironic/drivers/generic.py
index 1e7a83c4b..599e1139c 100644
--- a/ironic/drivers/generic.py
+++ b/ironic/drivers/generic.py
@@ -18,6 +18,7 @@ Generic hardware types.
from ironic.drivers import hardware_type
from ironic.drivers.modules import agent
+from ironic.drivers.modules import agent_power
from ironic.drivers.modules.ansible import deploy as ansible_deploy
from ironic.drivers.modules import fake
from ironic.drivers.modules import inspector
@@ -102,7 +103,7 @@ class ManualManagementHardware(GenericHardware):
@property
def supported_power_interfaces(self):
"""List of supported power interfaces."""
- return [fake.FakePower]
+ return [agent_power.AgentPower, fake.FakePower]
@property
def supported_vendor_interfaces(self):
diff --git a/ironic/drivers/ilo.py b/ironic/drivers/ilo.py
index 87fb19cba..4b824fffc 100644
--- a/ironic/drivers/ilo.py
+++ b/ironic/drivers/ilo.py
@@ -24,7 +24,6 @@ from ironic.drivers.modules.ilo import management
from ironic.drivers.modules.ilo import power
from ironic.drivers.modules.ilo import raid
from ironic.drivers.modules.ilo import vendor
-from ironic.drivers.modules import inspector
from ironic.drivers.modules import noop
@@ -53,8 +52,8 @@ class IloHardware(generic.GenericHardware):
@property
def supported_inspect_interfaces(self):
"""List of supported inspect interfaces."""
- return [inspect.IloInspect, inspector.Inspector,
- noop.NoInspect]
+ return [inspect.IloInspect] + super(
+ IloHardware, self).supported_inspect_interfaces
@property
def supported_management_interfaces(self):
@@ -81,7 +80,8 @@ class Ilo5Hardware(IloHardware):
@property
def supported_raid_interfaces(self):
"""List of supported raid interfaces."""
- return [raid.Ilo5RAID, noop.NoRAID]
+ return [raid.Ilo5RAID] + super(
+ Ilo5Hardware, self).supported_raid_interfaces
@property
def supported_management_interfaces(self):
diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py
index e667d8baa..9d5243533 100644
--- a/ironic/drivers/modules/agent.py
+++ b/ironic/drivers/modules/agent.py
@@ -169,34 +169,14 @@ def validate_http_provisioning_configuration(node):
class AgentDeployMixin(agent_base.AgentDeployMixin):
- @METRICS.timer('AgentDeployMixin.deploy_has_started')
- def deploy_has_started(self, task):
- commands = self._client.get_commands_status(task.node)
-
- for command in commands:
- if command['command_name'] == 'prepare_image':
- # deploy did start at some point
- return True
- return False
-
- @METRICS.timer('AgentDeployMixin.deploy_is_done')
- def deploy_is_done(self, task):
- commands = self._client.get_commands_status(task.node)
- if not commands:
- return False
+ has_decomposed_deploy_steps = True
- try:
- last_command = next(cmd for cmd in reversed(commands)
- if cmd['command_name'] == 'prepare_image')
- except StopIteration:
- return False
- else:
- return last_command['command_status'] != 'RUNNING'
-
- @METRICS.timer('AgentDeployMixin.continue_deploy')
+ @METRICS.timer('AgentDeployMixin.write_image')
+ @base.deploy_step(priority=80)
@task_manager.require_exclusive_lock
- def continue_deploy(self, task):
- task.process_event('resume')
+ def write_image(self, task):
+ if not task.driver.storage.should_write_image(task):
+ return
node = task.node
image_source = node.instance_info.get('image_source')
LOG.debug('Continuing deploy for node %(node)s with image %(img)s',
@@ -250,10 +230,33 @@ class AgentDeployMixin(agent_base.AgentDeployMixin):
if disk_label is not None:
image_info['disk_label'] = disk_label
- # Tell the client to download and write the image with the given args
- self._client.prepare_image(node, image_info)
-
- task.process_event('wait')
+ has_write_image = agent_base.find_step(
+ task, 'deploy', 'deploy', 'write_image') is not None
+ if not has_write_image:
+ LOG.warning('The agent on node %s does not have the deploy '
+ 'step deploy.write_image, using the deprecated '
+ 'synchronous fall-back', task.node.uuid)
+
+ if self.has_decomposed_deploy_steps and has_write_image:
+ configdrive = node.instance_info.get('configdrive')
+ # Now switch into the corresponding in-band deploy step and let the
+ # result be polled normally.
+ new_step = {'interface': 'deploy',
+ 'step': 'write_image',
+ 'args': {'image_info': image_info,
+ 'configdrive': configdrive}}
+ return agent_base.execute_step(task, new_step, 'deploy',
+ client=self._client)
+ else:
+ # TODO(dtantsur): remove in W
+ command = self._client.prepare_image(node, image_info, wait=True)
+ if command['command_status'] == 'FAILED':
+ # TODO(jimrollenhagen) power off if using neutron dhcp to
+ # align with pxe driver?
+ msg = (_('node %(node)s command status errored: %(error)s') %
+ {'node': node.uuid, 'error': command['command_error']})
+ LOG.error(msg)
+ deploy_utils.set_failed_state(task, msg)
# TODO(dtantsur): remove in W
def _get_uuid_from_result(self, task, type_uuid):
@@ -277,29 +280,18 @@ class AgentDeployMixin(agent_base.AgentDeployMixin):
return
return result
- @METRICS.timer('AgentDeployMixin.check_deploy_success')
- def check_deploy_success(self, node):
- # should only ever be called after we've validated that
- # the prepare_image command is complete
- command = self._client.get_commands_status(node)[-1]
- if command['command_status'] == 'FAILED':
- return command['command_error']
-
- @METRICS.timer('AgentDeployMixin.reboot_to_instance')
- def reboot_to_instance(self, task):
- task.process_event('resume')
+ @METRICS.timer('AgentDeployMixin.prepare_instance_boot')
+ @base.deploy_step(priority=60)
+ @task_manager.require_exclusive_lock
+ def prepare_instance_boot(self, task):
+ if not task.driver.storage.should_write_image(task):
+ task.driver.boot.prepare_instance(task)
+ # Move straight to the final steps
+ return
+
node = task.node
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
cpu_arch = task.node.properties.get('cpu_arch')
- error = self.check_deploy_success(node)
- if error is not None:
- # TODO(jimrollenhagen) power off if using neutron dhcp to
- # align with pxe driver?
- msg = (_('node %(node)s command status errored: %(error)s') %
- {'node': node.uuid, 'error': error})
- LOG.error(msg)
- deploy_utils.set_failed_state(task, msg)
- return
# If `boot_option` is set to `netboot`, PXEBoot.prepare_instance()
# would need root_uuid of the whole disk image to add it into the
@@ -375,9 +367,6 @@ class AgentDeployMixin(agent_base.AgentDeployMixin):
if CONF.agent.image_download_source == 'http':
deploy_utils.remove_http_instance_symlink(task.node.uuid)
- LOG.debug('Rebooting node %s to instance', node.uuid)
- self.reboot_and_finish_deploy(task)
-
class AgentDeploy(AgentDeployMixin, agent_base.AgentBaseMixin,
base.DeployInterface):
@@ -481,13 +470,10 @@ class AgentDeploy(AgentDeployMixin, agent_base.AgentBaseMixin,
:returns: status of the deploy. One of ironic.common.states.
"""
if manager_utils.is_fast_track(task):
+ # NOTE(mgoddard): For fast track we can skip this step and proceed
+ # immediately to the next deploy step.
LOG.debug('Performing a fast track deployment for %(node)s.',
{'node': task.node.uuid})
- # Update the database for the API and the task tracking resumes
- # the state machine state going from DEPLOYWAIT -> DEPLOYING
- task.process_event('wait')
- self.continue_deploy(task)
- return states.DEPLOYWAIT
elif task.driver.storage.should_write_image(task):
# Check if the driver has already performed a reboot in a previous
# deploy step.
@@ -498,19 +484,6 @@ class AgentDeploy(AgentDeployMixin, agent_base.AgentBaseMixin,
task.node.driver_internal_info = info
task.node.save()
return states.DEPLOYWAIT
- else:
- # TODO(TheJulia): At some point, we should de-dupe this code
- # as it is nearly identical to the iscsi deploy interface.
- # This is not being done now as it is expected to be
- # refactored in the near future.
- manager_utils.node_power_action(task, states.POWER_OFF)
- with manager_utils.power_state_for_network_configuration(task):
- task.driver.network.remove_provisioning_network(task)
- task.driver.network.configure_tenant_networks(task)
- task.driver.boot.prepare_instance(task)
- manager_utils.node_power_action(task, states.POWER_ON)
- LOG.info('Deployment to node %s done', task.node.uuid)
- return None
@METRICS.timer('AgentDeploy.prepare')
@task_manager.require_exclusive_lock
@@ -649,6 +622,18 @@ class AgentRAID(base.RAIDInterface):
"""Return the properties of the interface."""
return {}
+ @METRICS.timer('AgentRAID.get_deploy_steps')
+ def get_deploy_steps(self, task):
+ """Get the list of deploy steps from the agent.
+
+ :param task: a TaskManager object containing the node
+ :raises InstanceDeployFailure: if the deploy steps are not yet
+ available (cached), for example, when a node has just been
+ enrolled and has not been deployed yet.
+ :returns: A list of deploy step dictionaries
+ """
+ return agent_base.get_steps(task, 'deploy', interface='raid')
+
@METRICS.timer('AgentRAID.create_configuration')
@base.clean_step(priority=0)
def create_configuration(self, task,
diff --git a/ironic/drivers/modules/agent_base.py b/ironic/drivers/modules/agent_base.py
index ffcab213a..49debc5a9 100644
--- a/ironic/drivers/modules/agent_base.py
+++ b/ironic/drivers/modules/agent_base.py
@@ -35,6 +35,7 @@ from ironic.conductor import steps as conductor_steps
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
+from ironic.drivers import base
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
@@ -335,6 +336,13 @@ def get_steps(task, step_type, interface=None, override_priorities=None):
return steps
+def find_step(task, step_type, interface, name):
+ """Find the given in-band step."""
+ steps = get_steps(task, step_type, interface)
+ return conductor_steps.find_step(
+ steps, {'interface': interface, 'step': name})
+
+
def _raise(step_type, msg):
assert step_type in ('clean', 'deploy')
exc = (exception.NodeCleaningFailure if step_type == 'clean'
@@ -342,18 +350,20 @@ def _raise(step_type, msg):
raise exc(msg)
-def execute_step(task, step, step_type):
+def execute_step(task, step, step_type, client=None):
"""Execute a clean or deploy step asynchronously on the agent.
:param task: a TaskManager object containing the node
:param step: a step dictionary to execute
:param step_type: 'clean' or 'deploy'
+ :param client: agent client (if available)
:raises: NodeCleaningFailure (clean step) or InstanceDeployFailure (deploy
step) if the agent does not return a command status.
:returns: states.CLEANWAIT/DEPLOYWAIT to signify the step will be
completed async
"""
- client = _get_client()
+ if client is None:
+ client = _get_client()
ports = objects.Port.list_by_node_id(
task.context, task.node.id)
call = getattr(client, 'execute_%s_step' % step_type)
@@ -361,8 +371,7 @@ def execute_step(task, step, step_type):
if not result.get('command_status'):
_raise(step_type, _(
'Agent on node %(node)s returned bad command result: '
- '%(result)s') % {'node': task.node.uuid,
- 'result': result.get('command_error')})
+ '%(result)s') % {'node': task.node.uuid, 'result': result})
return states.CLEANWAIT if step_type == 'clean' else states.DEPLOYWAIT
@@ -383,8 +392,21 @@ def _step_failure_handler(task, msg, step_type):
class HeartbeatMixin(object):
"""Mixin class implementing heartbeat processing."""
+ has_decomposed_deploy_steps = False
+ """Whether the driver supports decomposed deploy steps.
+
+ Previously (since Rocky), drivers used a single 'deploy' deploy step on
+ the deploy interface. Some additional steps were added for the 'direct'
+ and 'iscsi' deploy interfaces in the Ussuri cycle, which means that
+ more of the deployment flow is driven by deploy steps.
+ """
+
def __init__(self):
self._client = _get_client()
+ if not self.has_decomposed_deploy_steps:
+ LOG.warning('%s does not support decomposed deploy steps. This '
+ 'is deprecated and will stop working in a future '
+ 'release', self.__class__.__name__)
def continue_deploy(self, task):
"""Continues the deployment of baremetal node.
@@ -502,8 +524,12 @@ class HeartbeatMixin(object):
# are currently in the core deploy.deploy step. Other deploy steps
# may cause the agent to boot, but we should not trigger deployment
# at that point if the driver is polling for completion of a step.
- if self.in_core_deploy_step(task):
+ if (not self.has_decomposed_deploy_steps
+ and self.in_core_deploy_step(task)):
msg = _('Failed checking if deploy is done')
+ # NOTE(mgoddard): support backwards compatibility for
+ # drivers which do not implement continue_deploy and
+ # reboot_to_instance as deploy steps.
if not self.deploy_has_started(task):
msg = _('Node failed to deploy')
self.continue_deploy(task)
@@ -648,7 +674,7 @@ class HeartbeatMixin(object):
# handler.
fail_reason = (_('Agent returned bad result for command '
'finalize_rescue: %(result)s') %
- {'result': result.get('command_error')})
+ {'result': agent_client.get_command_error(result)})
raise exception.InstanceRescueFailure(node=node.uuid,
instance=node.instance_uuid,
reason=fail_reason)
@@ -741,7 +767,60 @@ class AgentBaseMixin(object):
task, manage_boot=self.should_manage_boot(task))
-class AgentDeployMixin(HeartbeatMixin):
+class AgentOobStepsMixin(object):
+ """Mixin with out-of-band deploy steps."""
+
+ @METRICS.timer('AgentDeployMixin.switch_to_tenant_network')
+ @base.deploy_step(priority=30)
+ @task_manager.require_exclusive_lock
+ def switch_to_tenant_network(self, task):
+ """Deploy step to switch the node to the tenant network.
+
+ :param task: a TaskManager object containing the node
+ """
+ try:
+ with manager_utils.power_state_for_network_configuration(task):
+ task.driver.network.remove_provisioning_network(task)
+ task.driver.network.configure_tenant_networks(task)
+ except Exception as e:
+ msg = (_('Error changing node %(node)s to tenant networks after '
+ 'deploy. %(cls)s: %(error)s') %
+ {'node': task.node.uuid, 'cls': e.__class__.__name__,
+ 'error': e})
+ # NOTE(mgoddard): Don't collect logs since the node has been
+ # powered off.
+ log_and_raise_deployment_error(task, msg, collect_logs=False,
+ exc=e)
+
+ @METRICS.timer('AgentDeployMixin.boot_instance')
+ @base.deploy_step(priority=20)
+ @task_manager.require_exclusive_lock
+ def boot_instance(self, task):
+ """Deploy step to boot the final instance.
+
+ :param task: a TaskManager object containing the node
+ """
+ can_power_on = (states.POWER_ON in
+ task.driver.power.get_supported_power_states(task))
+ try:
+ if can_power_on:
+ manager_utils.node_power_action(task, states.POWER_ON)
+ else:
+ LOG.debug('Not trying to power on node %s that does not '
+ 'support powering on, assuming already running',
+ task.node.uuid)
+ except Exception as e:
+ msg = (_('Error booting node %(node)s after deploy. '
+ '%(cls)s: %(error)s') %
+ {'node': task.node.uuid, 'cls': e.__class__.__name__,
+ 'error': e})
+ # NOTE(mgoddard): Don't collect logs since the node has been
+ # powered off.
+ log_and_raise_deployment_error(task, msg, collect_logs=False,
+ exc=e)
+
+
+class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
"""Mixin with deploy methods."""
@METRICS.timer('AgentDeployMixin.get_clean_steps')
@@ -1016,7 +1095,7 @@ class AgentDeployMixin(HeartbeatMixin):
msg = (_('Agent returned error for %(type)s step %(step)s on node '
'%(node)s : %(err)s.') %
{'node': node.uuid,
- 'err': command.get('command_error'),
+ 'err': agent_client.get_command_error(command),
'step': current_step,
'type': step_type})
LOG.error(msg)
@@ -1068,16 +1147,13 @@ class AgentDeployMixin(HeartbeatMixin):
LOG.error(msg)
return _step_failure_handler(task, msg, step_type)
- @METRICS.timer('AgentDeployMixin.reboot_and_finish_deploy')
- def reboot_and_finish_deploy(self, task):
- """Helper method to trigger reboot on the node and finish deploy.
-
- This method initiates a reboot on the node. On success, it
- marks the deploy as complete. On failure, it logs the error
- and marks deploy as failure.
+ @METRICS.timer('AgentDeployMixin.tear_down_agent')
+ @base.deploy_step(priority=40)
+ @task_manager.require_exclusive_lock
+ def tear_down_agent(self, task):
+ """A deploy step to tear down the agent.
:param task: a TaskManager object containing the node
- :raises: InstanceDeployFailure, if node reboot failed.
"""
wait = CONF.agent.post_deploy_get_power_state_retry_interval * 1000
attempts = CONF.agent.post_deploy_get_power_state_retries + 1
@@ -1099,9 +1175,17 @@ class AgentDeployMixin(HeartbeatMixin):
# in-band methods
oob_power_off = strutils.bool_from_string(
node.driver_info.get('deploy_forces_oob_reboot', False))
+ can_power_on = (states.POWER_ON in
+ task.driver.power.get_supported_power_states(task))
try:
- if not oob_power_off:
+ if not can_power_on:
+ LOG.info('Power interface of node %(node)s does not support '
+ 'power on, using reboot to switch to the instance',
+ node.uuid)
+ self._client.sync(node)
+ manager_utils.node_power_action(task, states.REBOOT)
+ elif not oob_power_off:
try:
self._client.power_off(node)
except Exception as e:
@@ -1146,23 +1230,21 @@ class AgentDeployMixin(HeartbeatMixin):
'error': e})
log_and_raise_deployment_error(task, msg, exc=e)
- try:
- with manager_utils.power_state_for_network_configuration(task):
- task.driver.network.remove_provisioning_network(task)
- task.driver.network.configure_tenant_networks(task)
- manager_utils.node_power_action(task, states.POWER_ON)
- except Exception as e:
- msg = (_('Error rebooting node %(node)s after deploy. '
- '%(cls)s: %(error)s') %
- {'node': node.uuid, 'cls': e.__class__.__name__,
- 'error': e})
- # NOTE(mgoddard): Don't collect logs since the node has been
- # powered off.
- log_and_raise_deployment_error(task, msg, collect_logs=False,
- exc=e)
+ # TODO(dtantsur): remove in W
+ @METRICS.timer('AgentDeployMixin.reboot_and_finish_deploy')
+ def reboot_and_finish_deploy(self, task):
+ """Helper method to trigger reboot on the node and finish deploy.
+
+ This method initiates a reboot on the node. On success, it
+ marks the deploy as complete. On failure, it logs the error
+ and marks deploy as failure.
- # TODO(dtantsur): remove these two calls when this function becomes a
- # real deploy step.
+ :param task: a TaskManager object containing the node
+ :raises: InstanceDeployFailure, if node reboot failed.
+ """
+ # NOTE(dtantsur): do nothing here, the new deploy steps tear_down_agent
+ # and boot_instance will be picked up and finish the deploy (even for
+ # legacy deploy interfaces without decomposed steps).
task.process_event('wait')
manager_utils.notify_conductor_resume_deploy(task)
@@ -1283,7 +1365,7 @@ class AgentDeployMixin(HeartbeatMixin):
msg = (_("Failed to install a bootloader when "
"deploying node %(node)s. Error: %(error)s") %
{'node': node.uuid,
- 'error': result['command_error']})
+ 'error': agent_client.get_command_error(result)})
log_and_raise_deployment_error(task, msg)
else:
# Its possible the install will fail if the IPA image
@@ -1291,7 +1373,7 @@ class AgentDeployMixin(HeartbeatMixin):
LOG.info('Could not install bootloader for whole disk '
'image for node %(node)s, Error: %(error)s"',
{'node': node.uuid,
- 'error': result['command_error']})
+ 'error': agent_client.get_command_error(result)})
return
try:
diff --git a/ironic/drivers/modules/agent_client.py b/ironic/drivers/modules/agent_client.py
index 59d1aa203..d1f3d6759 100644
--- a/ironic/drivers/modules/agent_client.py
+++ b/ironic/drivers/modules/agent_client.py
@@ -30,6 +30,26 @@ METRICS = metrics_utils.get_metrics_logger(__name__)
DEFAULT_IPA_PORTAL_PORT = 3260
+REBOOT_COMMAND = 'run_image'
+
+
+def get_command_error(command):
+ """Extract an error string from the command result.
+
+ :param command: Command information from the agent.
+ :return: Error string.
+ """
+ error = command.get('command_error')
+ if error is None:
+ LOG.error('Agent returned invalid response: missing command_error in '
+ '%s', command)
+ return _('Invalid agent response')
+
+ if isinstance(error, dict):
+ return error.get('details') or error.get('message') or str(error)
+ else:
+ return error
+
class AgentClient(object):
"""Client for interacting with nodes via a REST API."""
@@ -42,9 +62,9 @@ class AgentClient(object):
"""Get URL endpoint for agent command request"""
agent_url = node.driver_internal_info.get('agent_url')
if not agent_url:
- raise exception.IronicException(_('Agent driver requires '
- 'agent_url in '
- 'driver_internal_info'))
+ raise exception.AgentConnectionFailed(_('Agent driver requires '
+ 'agent_url in '
+ 'driver_internal_info'))
return ('%(agent_url)s/%(api_version)s/commands/' %
{'agent_url': agent_url,
'api_version': CONF.agent.agent_api_version})
@@ -64,7 +84,7 @@ class AgentClient(object):
{'method': method, 'node': node.uuid, 'error': error})
raise exception.AgentAPIError(node=node.uuid,
status=error.get('code'),
- error=result.get('faultstring'))
+ error=get_command_error(result))
@METRICS.timer('AgentClient._wait_for_command')
@retrying.retry(
@@ -201,14 +221,14 @@ class AgentClient(object):
return result
@METRICS.timer('AgentClient.get_commands_status')
- @retrying.retry(
- retry_on_exception=(
- lambda e: isinstance(e, exception.AgentConnectionFailed)),
- stop_max_attempt_number=CONF.agent.max_command_attempts)
- def get_commands_status(self, node):
+ def get_commands_status(self, node, retry_connection=True,
+ expect_errors=False):
"""Get command status from agent.
:param node: A Node object.
+ :param retry_connection: Whether to retry connection problems.
+ :param expect_errors: If True, do not log connection problems as
+ errors.
:return: A list of command results, each result is related to a
command been issued to agent. A typical result can be:
@@ -237,17 +257,27 @@ class AgentClient(object):
"""
url = self._get_command_url(node)
LOG.debug('Fetching status of agent commands for node %s', node.uuid)
- try:
- resp = self.session.get(url, timeout=CONF.agent.command_timeout)
- except (requests.ConnectionError, requests.Timeout) as e:
- msg = (_('Failed to connect to the agent running on node %(node)s '
- 'to collect commands status. '
- 'Error: %(error)s') %
- {'node': node.uuid, 'error': e})
- LOG.error(msg)
- raise exception.AgentConnectionFailed(reason=msg)
- result = resp.json()['commands']
+ def _get():
+ try:
+ return self.session.get(url,
+ timeout=CONF.agent.command_timeout)
+ except (requests.ConnectionError, requests.Timeout) as e:
+ msg = (_('Failed to connect to the agent running on node '
+ '%(node)s to collect commands status. '
+ 'Error: %(error)s') %
+ {'node': node.uuid, 'error': e})
+ logging_call = LOG.debug if expect_errors else LOG.error
+ logging_call(msg)
+ raise exception.AgentConnectionFailed(reason=msg)
+
+ if retry_connection:
+ _get = retrying.retry(
+ retry_on_exception=(
+ lambda e: isinstance(e, exception.AgentConnectionFailed)),
+ stop_max_attempt_number=CONF.agent.max_command_attempts)(_get)
+
+ result = _get().json()['commands']
status = '; '.join('%(cmd)s: result "%(res)s", error "%(err)s"' %
{'cmd': r.get('command_name'),
'res': r.get('command_result'),
@@ -536,6 +566,21 @@ class AgentClient(object):
method='standby.power_off',
params={})
+ @METRICS.timer('AgentClient.reboot')
+ def reboot(self, node):
+ """Soft reboots the bare metal node by shutting down ramdisk OS.
+
+ :param node: A Node object.
+ :raises: IronicException when failed to issue the request or there was
+ a malformed response from the agent.
+ :raises: AgentAPIError when agent failed to execute specified command.
+ :returns: A dict containing command response from agent.
+ See :func:`get_commands_status` for a command result sample.
+ """
+ return self._command(node=node,
+ method='standby.%s' % REBOOT_COMMAND,
+ params={})
+
@METRICS.timer('AgentClient.sync')
def sync(self, node):
"""Flush file system buffers forcing changed blocks to disk.
diff --git a/ironic/drivers/modules/agent_power.py b/ironic/drivers/modules/agent_power.py
new file mode 100644
index 000000000..11ef5711a
--- /dev/null
+++ b/ironic/drivers/modules/agent_power.py
@@ -0,0 +1,220 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The agent power interface.
+"""
+
+import time
+
+from oslo_config import cfg
+from oslo_log import log
+import retrying
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import states
+from ironic.conductor import utils as cond_utils
+from ironic.drivers import base
+from ironic.drivers.modules import agent_client
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+_POWER_WAIT = 30
+
+
+class AgentPower(base.PowerInterface):
+ """Power interface using the running agent for power actions."""
+
+ def __init__(self):
+ super(AgentPower, self).__init__()
+ if not CONF.deploy.fast_track:
+ raise exception.InvalidParameterValue(
+ _('[deploy]fast_track must be True to enable the agent '
+ 'power interface'))
+ self._client = agent_client.AgentClient()
+
+ def get_properties(self):
+ """Return the properties of the interface.
+
+ :returns: dictionary of <property name>:<property description> entries.
+ """
+ return {}
+
+ def validate(self, task):
+ """Validate the driver-specific Node deployment info.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :raises: InvalidParameterValue on malformed parameter(s)
+ """
+ # NOTE(dtantsur): the fast_track option is mutable, so we have to check
+ # it again on validation.
+ if not CONF.deploy.fast_track:
+ raise exception.InvalidParameterValue(
+ _('[deploy]fast_track must be True to enable the agent '
+ 'power interface'))
+ # TODO(dtantsur): support ACTIVE nodes
+ if not cond_utils.agent_is_alive(task.node):
+ raise exception.InvalidParameterValue(
+ _('Agent seems offline for node %s, the agent power interface '
+ 'cannot be used') % task.node.uuid)
+
+ def supports_power_sync(self, task):
+ """Check if power sync is supported for the given node.
+
+ Not supported for the agent power since it is not possible to power
+ on/off nodes.
+
+ :param task: A TaskManager instance containing the node to act on
+ with a **shared** lock.
+ :returns: boolean, whether power sync is supported.
+ """
+ return False
+
+ def get_supported_power_states(self, task):
+ """Get a list of the supported power states.
+
+ Only contains REBOOT.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :returns: A list with the supported power states defined
+ in :mod:`ironic.common.states`.
+ """
+ return [states.REBOOT, states.SOFT_REBOOT]
+
+ def get_power_state(self, task):
+ """Return the power state of the task's node.
+
+ Essentially, the only known state is POWER ON, everything else is
+ an error (or more precisely ``None``).
+
+ :param task: A TaskManager instance containing the node to act on.
+ :returns: A power state. One of :mod:`ironic.common.states`.
+ """
+ # TODO(dtantsur): support ACTIVE nodes
+ if cond_utils.agent_is_alive(task.node):
+ return states.POWER_ON
+ else:
+ LOG.error('Node %s is not fast-track-able, cannot determine '
+ 'its power state via the "agent" power interface',
+ task.node.uuid)
+ return None
+
+ def set_power_state(self, task, power_state, timeout=None):
+ """Set the power state of the task's node.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :param power_state: Power state from :mod:`ironic.common.states`.
+ Only REBOOT and SOFT_REBOOT are supported and are synonymous.
+ :param timeout: timeout (in seconds) positive integer (> 0) for any
+ power state. ``None`` indicates to use default timeout.
+ :raises: PowerStateFailure on non-supported power state.
+ """
+ if power_state in (states.REBOOT, states.SOFT_REBOOT):
+ return self.reboot(task)
+ else:
+ LOG.error('Power state %(state)s is not implemented for node '
+ '%(node)s using the "agent" power interface',
+ {'node': task.node.uuid, 'state': power_state})
+ raise exception.PowerStateFailure(pstate=power_state)
+
+ def reboot(self, task, timeout=None):
+ """Perform a reboot of the task's node.
+
+ Only soft reboot is implemented.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :param timeout: timeout (in seconds) positive integer (> 0) for any
+ power state. ``None`` indicates to use default timeout.
+ """
+ node = task.node
+
+ self._client.reboot(node)
+
+ info = node.driver_internal_info
+ # NOTE(dtantsur): wipe the agent token, otherwise the rebooted agent
+ # won't be able to heartbeat. This is mostly a precaution since the
+ # calling code in conductor is expected to handle it.
+ if not info.get('agent_secret_token_pregenerated'):
+ info.pop('agent_secret_token', None)
+ # NOTE(dtantsur): the URL may change on reboot, wipe it as well (but
+ # only after we call reboot).
+ info.pop('agent_url', None)
+ node.driver_internal_info = info
+ node.save()
+
+ LOG.debug('Requested reboot of node %(node)s via the agent, waiting '
+ '%(wait)d seconds for the node to power down',
+ {'node': task.node.uuid, 'wait': _POWER_WAIT})
+ time.sleep(_POWER_WAIT)
+
+ if (node.provision_state in (states.DEPLOYING, states.CLEANING)
+ and (node.driver_internal_info.get('deployment_reboot')
+ or node.driver_internal_info.get('cleaning_reboot'))):
+ # NOTE(dtantsur): we need to downgrade the lock otherwise
+ # heartbeats won't be processed. It should not have side effects
+ # for nodes in DEPLOYING/CLEANING.
+ task.downgrade_lock()
+
+ try:
+ self._wait_for_reboot(task, timeout)
+ finally:
+ # The caller probably expects a lock, so re-acquire it
+ task.upgrade_lock()
+
+ def _wait_for_reboot(self, task, timeout):
+ wait = CONF.agent.post_deploy_get_power_state_retry_interval
+ if not timeout:
+ timeout = CONF.agent.post_deploy_get_power_state_retries * wait
+
+ @retrying.retry(
+ stop_max_delay=timeout,
+ retry_on_result=lambda result: not result,
+ retry_on_exception=(
+ lambda e: isinstance(e, exception.AgentConnectionFailed)),
+ wait_fixed=wait * 1000
+ )
+ def _wait_until_rebooted(task):
+ try:
+ status = self._client.get_commands_status(
+ task.node, retry_connection=False, expect_errors=True)
+ except exception.AgentConnectionFailed:
+ LOG.debug('Still waiting for the agent to come back on the '
+ 'node %s', task.node.uuid)
+ raise
+
+ if any(cmd['command_name'] == agent_client.REBOOT_COMMAND
+ for cmd in status):
+ LOG.debug('Still waiting for the agent to power off on the '
+ 'node %s', task.node.uuid)
+ return False
+
+ return True
+
+ try:
+ _wait_until_rebooted(task)
+ except exception.AgentConnectionFailed as exc:
+ msg = _('Agent failed to come back on %(node)s with the "agent" '
+ 'power interface: %(exc)s') % {
+ 'node': task.node.uuid, 'exc': exc}
+ LOG.error(msg)
+ raise exception.PowerStateFailure(msg)
+ except Exception as exc:
+ LOG.error('Could not reboot node %(node)s with the "agent" power '
+ 'interface: %(exc)s',
+ {'node': task.node.uuid, 'exc': exc})
+ raise exception.PowerStateFailure(
+ _('Unexpected error when rebooting through the agent: %s')
+ % exc)
diff --git a/ironic/drivers/modules/ansible/deploy.py b/ironic/drivers/modules/ansible/deploy.py
index cbecdc976..d4186741f 100644
--- a/ironic/drivers/modules/ansible/deploy.py
+++ b/ironic/drivers/modules/ansible/deploy.py
@@ -375,9 +375,13 @@ def _get_clean_steps(node, interface=None, override_priorities=None):
return steps
-class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
+class AnsibleDeploy(agent_base.HeartbeatMixin,
+ agent_base.AgentOobStepsMixin,
+ base.DeployInterface):
"""Interface for deploy-related actions."""
+ has_decomposed_deploy_steps = True
+
def __init__(self):
super(AnsibleDeploy, self).__init__()
# NOTE(pas-ha) overriding agent creation as we won't be
@@ -442,12 +446,22 @@ class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
+ def process_next_step(self, task, step_type):
+ """Start the next clean/deploy step if the previous one is complete.
+
+ :param task: a TaskManager instance
+ :param step_type: "clean" or "deploy"
+ """
+ # Run the next step as soon as agent heartbeats in deploy.deploy
+ if step_type == 'deploy' and self.in_core_deploy_step(task):
+ manager_utils.notify_conductor_resume_deploy(task)
+
@staticmethod
def _required_image_info(task):
"""Gather and save needed image info while the context is good.
Gather image info that will be needed later, during the
- continue_deploy execution, where the context won't be the same
+ write_image execution, where the context won't be the same
anymore, since coming from the server's heartbeat.
"""
node = task.node
@@ -586,35 +600,30 @@ class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
manager_utils.restore_power_state_if_needed(
task, power_state_to_restore)
- @METRICS.timer('AnsibleDeploy.continue_deploy')
- def continue_deploy(self, task):
+ @METRICS.timer('AnsibleDeploy.write_image')
+ @base.deploy_step(priority=80)
+ def write_image(self, task):
# NOTE(pas-ha) the lock should be already upgraded in heartbeat,
# just setting its purpose for better logging
task.upgrade_lock(purpose='deploy')
- task.process_event('resume')
# NOTE(pas-ha) this method is called from heartbeat processing only,
# so we are sure we need this particular method, not the general one
node_address = _get_node_ip(task)
self._ansible_deploy(task, node_address)
- self.reboot_to_instance(task)
-
- @METRICS.timer('AnsibleDeploy.reboot_to_instance')
- def reboot_to_instance(self, task):
- node = task.node
- LOG.info('Ansible complete deploy on node %s', node.uuid)
-
- LOG.debug('Rebooting node %s to instance', node.uuid)
+ LOG.info('Ansible complete deploy on node %s', task.node.uuid)
manager_utils.node_set_boot_device(task, 'disk', persistent=True)
- self.reboot_and_finish_deploy(task)
- task.driver.boot.clean_up_ramdisk(task)
- # TODO(dtantsur): remove these two calls when this function becomes a
- # real deploy step.
- task.process_event('wait')
- manager_utils.notify_conductor_resume_deploy(task)
+ @METRICS.timer('AnsibleDeploy.tear_down_agent')
+ @base.deploy_step(priority=40)
+ @task_manager.require_exclusive_lock
+ def tear_down_agent(self, task):
+ """A deploy step to tear down the agent.
+
+ Shuts down the machine and removes it from the provisioning
+ network.
- @METRICS.timer('AnsibleDeploy.reboot_and_finish_deploy')
- def reboot_and_finish_deploy(self, task):
+ :param task: a TaskManager object containing the node
+ """
wait = CONF.ansible.post_deploy_get_power_state_retry_interval * 1000
attempts = CONF.ansible.post_deploy_get_power_state_retries + 1
@@ -652,13 +661,6 @@ class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
manager_utils.node_power_action(task, states.POWER_OFF)
else:
manager_utils.node_power_action(task, states.POWER_OFF)
- power_state_to_restore = (
- manager_utils.power_on_node_if_needed(task))
- task.driver.network.remove_provisioning_network(task)
- task.driver.network.configure_tenant_networks(task)
- manager_utils.restore_power_state_if_needed(
- task, power_state_to_restore)
- manager_utils.node_power_action(task, states.POWER_ON)
except Exception as e:
msg = (_('Error rebooting node %(node)s after deploy. '
'Error: %(error)s') %
diff --git a/ironic/drivers/modules/console_utils.py b/ironic/drivers/modules/console_utils.py
index d137bbf40..b2f92ba3d 100644
--- a/ironic/drivers/modules/console_utils.py
+++ b/ironic/drivers/modules/console_utils.py
@@ -21,6 +21,7 @@ Ironic console utilities.
import errno
import fcntl
+import ipaddress
import os
import signal
import socket
@@ -32,7 +33,6 @@ from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import fileutils
-from oslo_utils import netutils
import psutil
from ironic.common import exception
@@ -402,7 +402,7 @@ def start_socat_console(node_uuid, port, console_cmd):
args.append('-L%s' % pid_file)
console_host = CONF.console.socat_address
- if netutils.is_valid_ipv6(console_host):
+ if ipaddress.ip_address(console_host).version == 6:
arg = ('TCP6-LISTEN:%(port)s,bind=[%(host)s],reuseaddr,fork,'
'max-children=1')
else:
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index a255700d9..6360f0257 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -311,6 +311,7 @@ def agent_add_clean_params(task):
secure_erase = CONF.deploy.enable_ata_secure_erase
info['agent_enable_ata_secure_erase'] = secure_erase
info['disk_erasure_concurrency'] = CONF.deploy.disk_erasure_concurrency
+ info['agent_erase_skip_read_only'] = CONF.deploy.erase_skip_read_only
task.node.driver_internal_info = info
task.node.save()
@@ -378,6 +379,54 @@ def get_pxe_boot_file(node):
return boot_file
+def get_ipxe_boot_file(node):
+ """Return the iPXE boot file name requested for deploy.
+
+ This method returns iPXE boot file name to be used for deploy.
+ Architecture specific boot file is searched first. BIOS/UEFI
+ boot file is used if no valid architecture specific file found.
+
+ If no valid value is found, the default reverts to the
+ ``get_pxe_boot_file`` method and thus the
+ ``[pxe]pxe_bootfile_name`` and ``[pxe]uefi_ipxe_bootfile_name``
+ settings.
+
+ :param node: A single Node.
+ :returns: The iPXE boot file name.
+ """
+ cpu_arch = node.properties.get('cpu_arch')
+ boot_file = CONF.pxe.ipxe_bootfile_name_by_arch.get(cpu_arch)
+ if boot_file is None:
+ if boot_mode_utils.get_boot_mode(node) == 'uefi':
+ boot_file = CONF.pxe.uefi_ipxe_bootfile_name
+ else:
+ boot_file = CONF.pxe.ipxe_bootfile_name
+
+ if boot_file is None:
+ boot_file = get_pxe_boot_file(node)
+
+ return boot_file
+
+
+def get_ipxe_config_template(node):
+ """Return the iPXE config template file name requested of deploy.
+
+ This method returns the iPXE configuration template file.
+
+ :param node: A single Node.
+ :returns: The iPXE config template file name.
+ """
+ # NOTE(TheJulia): iPXE configuration files don't change based upon the
+ # architecture and we're not trying to support multiple different boot
+ # loaders by architecture as they are all consistent. Where as PXE
+ # could need to be grub for one arch, PXELINUX for another.
+ configured_template = CONF.pxe.ipxe_config_template
+ override_template = node.driver_info.get('pxe_template')
+ if override_template:
+ configured_template = override_template
+ return configured_template or get_pxe_config_template(node)
+
+
def get_pxe_config_template(node):
"""Return the PXE config template file name requested for deploy.
@@ -463,6 +512,11 @@ def validate_image_properties(ctx, deploy_info, properties):
the mentioned properties.
"""
image_href = deploy_info['image_source']
+ boot_iso = deploy_info.get('boot_iso')
+ if image_href and boot_iso:
+ raise exception.InvalidParameterValue(_(
+ "An 'image_source' and 'boot_iso' parameter may not be "
+ "specified at the same time."))
try:
img_service = image_service.get_image_service(image_href, context=ctx)
image_props = img_service.show(image_href)['properties']
@@ -649,11 +703,21 @@ def get_image_instance_info(node):
instance_info. Also raises same exception if kernel/ramdisk is
missing in instance_info for non-glance images.
"""
+ # TODO(TheJulia): We seem to have a lack of direct unit testing of this
+ # method, but that is likely okay. If memory serves we test this at
+ # a few different levels. That being said, it would be good for some
+ # more explicit unit testing to exist.
info = {}
- info['image_source'] = node.instance_info.get('image_source')
is_whole_disk_image = node.driver_internal_info.get('is_whole_disk_image')
- if not is_whole_disk_image:
+ boot_iso = node.instance_info.get('boot_iso')
+
+ if not boot_iso:
+ info['image_source'] = node.instance_info.get('image_source')
+ else:
+ info['boot_iso'] = boot_iso
+
+ if not is_whole_disk_image and not boot_iso:
if not service_utils.is_glance_image(info['image_source']):
info['kernel'] = node.instance_info.get('kernel')
info['ramdisk'] = node.instance_info.get('ramdisk')
diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py
index facc0aeba..cd831af6c 100644
--- a/ironic/drivers/modules/drac/raid.py
+++ b/ironic/drivers/modules/drac/raid.py
@@ -371,8 +371,7 @@ def list_raid_settings(node):
client = drac_common.get_drac_client(node)
return client.list_raid_settings()
except drac_exceptions.BaseClientException as exc:
- LOG.error('DRAC driver failed to list raid settings'
- 'on %(raid_controller_fqdd)s '
+ LOG.error('DRAC driver failed to list raid settings '
'for node %(node_uuid)s. '
'Reason: %(error)s.',
{'node_uuid': node.uuid,
diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py
index 62c6bbb23..ef330c5c2 100644
--- a/ironic/drivers/modules/ilo/common.py
+++ b/ironic/drivers/modules/ilo/common.py
@@ -116,6 +116,15 @@ POST_INPOSTDISCOVERY_STATE = "InPostDiscoveryComplete"
POST_FINISHEDPOST_STATE = "FinishedPost"
""" Node is in FinishedPost post state."""
+SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY = 'legacy bios only'
+""" Node supports only legacy BIOS boot mode."""
+
+SUPPORTED_BOOT_MODE_UEFI_ONLY = 'uefi only'
+""" Node supports only UEFI boot mode."""
+
+SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI = 'legacy bios and uefi'
+""" Node supports both legacy BIOS and UEFI boot mode."""
+
def copy_image_to_web_server(source_file_path, destination):
"""Copies the given image to the http web server.
@@ -492,6 +501,24 @@ def set_boot_mode(node, boot_mode):
{'uuid': node.uuid, 'boot_mode': boot_mode})
+def get_current_boot_mode(node):
+ """Get the current boot mode for a node.
+
+ :param node: an ironic node object.
+ :raises: IloOperationError if failed to fetch boot mode.
+ :raises: IloOperationNotSupported if node does not support getting pending
+ boot mode.
+ """
+ ilo_object = get_ilo_object(node)
+ operation = _("Get current boot mode")
+ try:
+ c_boot_mode = ilo_object.get_current_boot_mode()
+ return BOOT_MODE_ILO_TO_GENERIC[c_boot_mode.lower()]
+ except ilo_error.IloError as ilo_exception:
+ raise exception.IloOperationError(operation=operation,
+ error=ilo_exception)
+
+
def update_boot_mode(task):
"""Update instance_info with boot mode to be used for deploy.
diff --git a/ironic/drivers/modules/ilo/management.py b/ironic/drivers/modules/ilo/management.py
index 07cbe7b41..45bac59a1 100644
--- a/ironic/drivers/modules/ilo/management.py
+++ b/ironic/drivers/modules/ilo/management.py
@@ -24,6 +24,7 @@ from oslo_utils import excutils
from oslo_utils import importutils
from ironic.common import boot_devices
+from ironic.common import boot_modes
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
@@ -684,6 +685,59 @@ class IloManagement(base.ManagementInterface):
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
+ def get_supported_boot_modes(self, task):
+ """Get a list of the supported boot devices.
+
+ :param task: a task from TaskManager.
+ :raises: IloOperationError if any exception happens in proliantutils
+ :returns: A list with the supported boot devices defined
+ in :mod:`ironic.common.boot_devices`.
+ """
+ node = task.node
+ ilo_object = ilo_common.get_ilo_object(node)
+ try:
+ modes = ilo_object.get_supported_boot_mode()
+ if modes == ilo_common.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY:
+ return [boot_modes.LEGACY_BIOS]
+ elif modes == ilo_common.SUPPORTED_BOOT_MODE_UEFI_ONLY:
+ return [boot_modes.UEFI]
+ elif modes == ilo_common.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI:
+ return [boot_modes.UEFI, boot_modes.LEGACY_BIOS]
+ except ilo_error.IloError as ilo_exception:
+ operation = _("Get supported boot modes")
+ raise exception.IloOperationError(operation=operation,
+ error=ilo_exception)
+
+ @task_manager.require_exclusive_lock
+ def set_boot_mode(self, task, mode):
+ """Set the boot mode for a node.
+
+ Set the boot mode to use on next reboot of the node.
+
+ :param task: A task from TaskManager.
+ :param mode: The boot mode, one of
+ :mod:`ironic.common.boot_modes`.
+ :raises: InvalidParameterValue if an invalid boot mode is
+ specified.
+ :raises: IloOperationError if setting boot mode failed.
+ """
+ if mode not in self.get_supported_boot_modes(task):
+ raise exception.InvalidParameterValue(_(
+ "The given boot mode '%s' is not supported.") % mode)
+ ilo_common.set_boot_mode(task.node, mode)
+
+ def get_boot_mode(self, task):
+ """Get the current boot mode for a node.
+
+ Provides the current boot mode of the node.
+
+ :param task: A task from TaskManager.
+ :raises: IloOperationError on an error from IloClient library.
+ :returns: The boot mode, one of :mod:`ironic.common.boot_mode` or
+ None if it is unknown.
+ """
+ return ilo_common.get_current_boot_mode(task.node)
+
class Ilo5Management(IloManagement):
diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py
index 4dfa6ce59..85beca183 100644
--- a/ironic/drivers/modules/ipmitool.py
+++ b/ironic/drivers/modules/ipmitool.py
@@ -30,13 +30,13 @@ DRIVER.
"""
import contextlib
-import functools
import os
import re
import subprocess
import tempfile
import time
+from eventlet.green import subprocess as green_subprocess
from ironic_lib import metrics_utils
from ironic_lib import utils as ironic_utils
from oslo_concurrency import processutils
@@ -389,31 +389,6 @@ def _parse_driver_info(node):
}
-def _exec_ipmitool_wait(timeout, driver_info, popen_obj):
- wait_interval = min(timeout, 0.5)
-
- while timeout >= 0:
- if not popen_obj.poll():
- return
-
- time.sleep(wait_interval)
- timeout -= wait_interval
-
- LOG.warning('Killing timed out IPMI process "%(cmd)s" for node %(node)s.',
- {'node': driver_info['uuid'], 'cmd': popen_obj.cmd})
-
- popen_obj.terminate()
- time.sleep(0.5)
- if popen_obj.poll():
- popen_obj.kill()
-
- time.sleep(1)
-
- if popen_obj.poll():
- LOG.warning('Could not kill IPMI process "%(cmd)s" for node %(node)s.',
- {'node': driver_info['uuid'], 'cmd': popen_obj.cmd})
-
-
def _get_ipmitool_args(driver_info, pw_file=None):
ipmi_version = ('lanplus'
if driver_info['protocol_version'] == '2.0'
@@ -486,22 +461,12 @@ def _exec_ipmitool(driver_info, command, check_exit_code=None,
args.append('1')
args.append('-N')
- if CONF.ipmi.use_ipmitool_retries:
- args.append(str(CONF.ipmi.min_command_interval))
- else:
- args.append('1')
+ args.append(str(CONF.ipmi.min_command_interval))
extra_args = {}
if kill_on_timeout:
- # NOTE(etingof): We can't trust ipmitool to terminate in time.
- # Therefore we have to kill it if it is running for longer than
- # we asked it to.
- # For that purpose we inject the time-capped `popen.wait` call
- # before the uncapped `popen.communicate` is called internally.
- # That gives us a chance to kill misbehaving `ipmitool` child.
- extra_args['on_execute'] = functools.partial(
- _exec_ipmitool_wait, timeout, driver_info)
+ extra_args['timeout'] = timeout
if check_exit_code is not None:
extra_args['check_exit_code'] = check_exit_code
@@ -591,7 +556,10 @@ def _set_and_wait(task, power_action, driver_info, timeout=None):
try:
_exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
- processutils.ProcessExecutionError) as e:
+ processutils.ProcessExecutionError,
+ subprocess.TimeoutExpired,
+ # https://github.com/eventlet/eventlet/issues/624
+ green_subprocess.TimeoutExpired) as e:
LOG.warning("IPMI power action %(cmd)s failed for node %(node_id)s "
"with error: %(error)s.",
{'node_id': driver_info['uuid'], 'cmd': cmd, 'error': e})
diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py
index 8069de0b5..41c83be61 100644
--- a/ironic/drivers/modules/iscsi_deploy.py
+++ b/ironic/drivers/modules/iscsi_deploy.py
@@ -602,6 +602,8 @@ class ISCSIDeploy(agent_base.AgentDeployMixin, agent_base.AgentBaseMixin,
base.DeployInterface):
"""iSCSI Deploy Interface for deploy-related actions."""
+ has_decomposed_deploy_steps = True
+
def get_properties(self):
return agent_base.VENDOR_PROPERTIES
@@ -647,14 +649,12 @@ class ISCSIDeploy(agent_base.AgentDeployMixin, agent_base.AgentBaseMixin,
"""
node = task.node
if manager_utils.is_fast_track(task):
+ # NOTE(mgoddard): For fast track we can mostly skip this step and
+ # proceed to the next step (i.e. write_image).
LOG.debug('Performing a fast track deployment for %(node)s.',
{'node': task.node.uuid})
deploy_utils.cache_instance_image(task.context, node)
check_image_size(task)
- # Update the database for the API and the task tracking resumes
- # the state machine state going from DEPLOYWAIT -> DEPLOYING
- task.process_event('wait')
- self.continue_deploy(task)
elif task.driver.storage.should_write_image(task):
# Standard deploy process
deploy_utils.cache_instance_image(task.context, node)
@@ -666,29 +666,16 @@ class ISCSIDeploy(agent_base.AgentDeployMixin, agent_base.AgentBaseMixin,
manager_utils.node_power_action(task, states.REBOOT)
info = task.node.driver_internal_info
info.pop('deployment_reboot', None)
+ info.pop('deployment_uuids', None)
task.node.driver_internal_info = info
task.node.save()
return states.DEPLOYWAIT
- else:
- # Boot to an Storage Volume
-
- # TODO(TheJulia): At some point, we should de-dupe this code
- # as it is nearly identical to the agent deploy interface.
- # This is not being done now as it is expected to be
- # refactored in the near future.
- manager_utils.node_power_action(task, states.POWER_OFF)
- with manager_utils.power_state_for_network_configuration(task):
- task.driver.network.remove_provisioning_network(task)
- task.driver.network.configure_tenant_networks(task)
- task.driver.boot.prepare_instance(task)
- manager_utils.node_power_action(task, states.POWER_ON)
-
- return None
- @METRICS.timer('AgentDeployMixin.continue_deploy')
+ @METRICS.timer('ISCSIDeploy.write_image')
+ @base.deploy_step(priority=90)
@task_manager.require_exclusive_lock
- def continue_deploy(self, task):
+ def write_image(self, task):
"""Method invoked when deployed using iSCSI.
This method is invoked during a heartbeat from an agent when
@@ -701,18 +688,39 @@ class ISCSIDeploy(agent_base.AgentDeployMixin, agent_base.AgentBaseMixin,
:raises: InstanceDeployFailure, if it encounters some error during
the deploy.
"""
- task.process_event('resume')
+ if not task.driver.storage.should_write_image(task):
+ LOG.debug('Skipping write_image for node %s', task.node.uuid)
+ return
+
node = task.node
LOG.debug('Continuing the deployment on node %s', node.uuid)
uuid_dict_returned = do_agent_iscsi_deploy(task, self._client)
+ utils.set_node_nested_field(node, 'driver_internal_info',
+ 'deployment_uuids', uuid_dict_returned)
+ node.save()
+
+ @METRICS.timer('ISCSIDeploy.prepare_instance_boot')
+ @base.deploy_step(priority=80)
+ def prepare_instance_boot(self, task):
+ if not task.driver.storage.should_write_image(task):
+ task.driver.boot.prepare_instance(task)
+ return
+
+ node = task.node
+ try:
+ uuid_dict_returned = node.driver_internal_info['deployment_uuids']
+ except KeyError:
+ raise exception.InstanceDeployFailure(
+ _('Invalid internal state: the write_image deploy step has '
+ 'not been called before prepare_instance_boot'))
root_uuid = uuid_dict_returned.get('root uuid')
efi_sys_uuid = uuid_dict_returned.get('efi system partition uuid')
prep_boot_part_uuid = uuid_dict_returned.get(
'PrEP Boot partition uuid')
+
self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid,
prep_boot_part_uuid=prep_boot_part_uuid)
- self.reboot_and_finish_deploy(task)
@METRICS.timer('ISCSIDeploy.prepare')
@task_manager.require_exclusive_lock
@@ -788,3 +796,6 @@ class ISCSIDeploy(agent_base.AgentDeployMixin, agent_base.AgentBaseMixin,
"""
deploy_utils.destroy_images(task.node.uuid)
super(ISCSIDeploy, self).clean_up(task)
+ if utils.pop_node_nested_field(task.node, 'driver_internal_info',
+ 'deployment_uuids'):
+ task.node.save()
diff --git a/ironic/drivers/modules/network/common.py b/ironic/drivers/modules/network/common.py
index ec98cac65..2c3c4be0c 100644
--- a/ironic/drivers/modules/network/common.py
+++ b/ironic/drivers/modules/network/common.py
@@ -410,7 +410,7 @@ class VIFPortIDMixin(object):
or self._get_vif_id_by_port_like_obj(p_obj) or None)
def get_node_network_data(self, task):
- """Return network configuration for node NICs.
+ """Get network configuration data for node's ports/portgroups.
Gather L2 and L3 network settings from ironic node `network_data`
field. Ironic would eventually pass network configuration to the node
@@ -633,3 +633,51 @@ class NeutronVIFPortIDMixin(VIFPortIDMixin):
# DELETING state.
if task.node.provision_state in [states.ACTIVE, states.DELETING]:
neutron.unbind_neutron_port(vif_id, context=task.context)
+
+ def get_node_network_data(self, task):
+ """Get network configuration data for node ports.
+
+ Pull network data from ironic node object if present, otherwise
+ collect it for Neutron VIFs.
+
+ :param task: A TaskManager instance.
+ :raises: InvalidParameterValue, if the network interface configuration
+ is invalid.
+ :raises: MissingParameterValue, if some parameters are missing.
+ :returns: a dict holding network configuration information adhearing
+ Nova network metadata layout (`network_data.json`).
+ """
+ # NOTE(etingof): static network data takes precedence
+ network_data = (
+ super(NeutronVIFPortIDMixin, self).get_node_network_data(task))
+ if network_data:
+ return network_data
+
+ node = task.node
+
+ LOG.debug('Gathering network data from ports of node '
+ '%(node)s', {'node': node.uuid})
+
+ network_data = collections.defaultdict(list)
+
+ for port_obj in task.ports:
+ vif_port_id = self.get_current_vif(task, port_obj)
+
+ LOG.debug('Considering node %(node)s port %(port)s, VIF %(vif)s',
+ {'node': node.uuid, 'port': port_obj.uuid,
+ 'vif': vif_port_id})
+
+ if not vif_port_id:
+ continue
+
+ port_network_data = neutron.get_neutron_port_data(
+ port_obj.uuid, vif_port_id, context=task.context)
+
+ for field, field_data in port_network_data.items():
+ if field_data:
+ network_data[field].extend(field_data)
+
+ LOG.debug('Collected network data for node %(node)s: %(data)s',
+ {'node': node.uuid, 'data': network_data})
+
+ return network_data
diff --git a/ironic/drivers/modules/pxe_base.py b/ironic/drivers/modules/pxe_base.py
index 8632a43cc..d3cb8316e 100644
--- a/ironic/drivers/modules/pxe_base.py
+++ b/ironic/drivers/modules/pxe_base.py
@@ -200,7 +200,10 @@ class PXEBaseMixin(object):
if ramdisk_params.get("ipa-api-url"):
pxe_options["ipa-api-url"] = ramdisk_params["ipa-api-url"]
- pxe_config_template = deploy_utils.get_pxe_config_template(node)
+ if self.ipxe_enabled:
+ pxe_config_template = deploy_utils.get_ipxe_config_template(node)
+ else:
+ pxe_config_template = deploy_utils.get_pxe_config_template(node)
pxe_utils.create_pxe_config(task, pxe_options,
pxe_config_template,
diff --git a/ironic/drivers/modules/redfish/boot.py b/ironic/drivers/modules/redfish/boot.py
index 8c5bfc8a9..445ee0d57 100644
--- a/ironic/drivers/modules/redfish/boot.py
+++ b/ironic/drivers/modules/redfish/boot.py
@@ -458,7 +458,7 @@ def _parse_deploy_info(node):
def _prepare_iso_image(task, kernel_href, ramdisk_href,
bootloader_href=None, configdrive=None,
- root_uuid=None, params=None):
+ root_uuid=None, params=None, base_iso=None):
"""Prepare an ISO to boot the node.
Build bootable ISO out of `kernel_href` and `ramdisk_href` (and
@@ -486,23 +486,28 @@ def _prepare_iso_image(task, kernel_href, ramdisk_href,
value.
:raises: ImageCreationFailed, if creating ISO image failed.
"""
- if not kernel_href or not ramdisk_href:
+ if (not kernel_href or not ramdisk_href) and not base_iso:
raise exception.InvalidParameterValue(_(
- "Unable to find kernel or ramdisk for "
- "building ISO for %(node)s") %
+ "Unable to find kernel, ramdisk for "
+ "building ISO, or explicit ISO for %(node)s") %
{'node': task.node.uuid})
i_info = task.node.instance_info
+ # NOTE(TheJulia): Until we support modifying a base iso, most of
+ # this logic actually does nothing in the end. But it should!
if deploy_utils.get_boot_option(task.node) == "ramdisk":
- kernel_params = "root=/dev/ram0 text "
- kernel_params += i_info.get("ramdisk_kernel_arguments", "")
+ if not base_iso:
+ kernel_params = "root=/dev/ram0 text "
+ kernel_params += i_info.get("ramdisk_kernel_arguments", "")
+ else:
+ kernel_params = None
else:
kernel_params = i_info.get(
'kernel_append_params', CONF.redfish.kernel_append_params)
- if params:
+ if params and not base_iso:
kernel_params = ' '.join(
(kernel_params, ' '.join(
'%s=%s' % kv for kv in params.items())))
@@ -527,7 +532,11 @@ def _prepare_iso_image(task, kernel_href, ramdisk_href,
configdrive_href = configdrive
- if configdrive:
+ # FIXME(TheJulia): This is treated as conditional with
+ # a base_iso as the intent, eventually, is to support
+ # injection into the supplied image.
+
+ if configdrive and not base_iso:
parsed_url = urlparse.urlparse(configdrive)
if not parsed_url.scheme:
cfgdrv_blob = base64.decode_as_bytes(configdrive)
@@ -549,7 +558,8 @@ def _prepare_iso_image(task, kernel_href, ramdisk_href,
configdrive_href=configdrive_href,
root_uuid=root_uuid,
kernel_params=kernel_params,
- boot_mode=boot_mode)
+ boot_mode=boot_mode,
+ base_iso=base_iso)
iso_object_name = _get_iso_image_name(task.node)
@@ -597,6 +607,9 @@ def _prepare_deploy_iso(task, params, mode):
ramdisk_href = d_info.get('%s_ramdisk' % mode)
bootloader_href = d_info.get('bootloader')
+ # TODO(TheJulia): At some point we should support something like
+ # boot_iso for the deploy interface, perhaps when we support config
+ # injection.
prepare_iso_image = functools.partial(
_prepare_iso_image, task, kernel_href, ramdisk_href,
bootloader_href=bootloader_href, params=params)
@@ -656,8 +669,9 @@ def _prepare_boot_iso(task, root_uuid=None):
kernel_href = node.instance_info.get('kernel')
ramdisk_href = node.instance_info.get('ramdisk')
+ base_iso = node.instance_info.get('boot_iso')
- if not kernel_href or not ramdisk_href:
+ if (not kernel_href or not ramdisk_href) and not base_iso:
image_href = d_info['image_source']
@@ -671,17 +685,17 @@ def _prepare_boot_iso(task, root_uuid=None):
if not ramdisk_href:
ramdisk_href = image_properties.get('ramdisk_id')
- if not kernel_href or not ramdisk_href:
- raise exception.InvalidParameterValue(_(
- "Unable to find kernel or ramdisk for "
- "to generate boot ISO for %(node)s") %
- {'node': task.node.uuid})
+ if (not kernel_href or not ramdisk_href):
+ raise exception.InvalidParameterValue(_(
+ "Unable to find kernel or ramdisk for "
+ "to generate boot ISO for %(node)s") %
+ {'node': task.node.uuid})
bootloader_href = d_info.get('bootloader')
return _prepare_iso_image(
task, kernel_href, ramdisk_href, bootloader_href,
- root_uuid=root_uuid)
+ root_uuid=root_uuid, base_iso=base_iso)
class RedfishVirtualMediaBoot(base.BootInterface):
@@ -767,7 +781,8 @@ class RedfishVirtualMediaBoot(base.BootInterface):
if node.driver_internal_info.get('is_whole_disk_image'):
props = []
-
+ elif d_info.get('boot_iso'):
+ props = ['boot_iso']
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
diff --git a/ironic/drivers/modules/redfish/management.py b/ironic/drivers/modules/redfish/management.py
index f415b6d20..22ef03b49 100644
--- a/ironic/drivers/modules/redfish/management.py
+++ b/ironic/drivers/modules/redfish/management.py
@@ -90,7 +90,7 @@ def _set_boot_device(task, system, device, persistent=False):
try:
system.set_system_boot_options(device, enabled=enabled)
except sushy.exceptions.SushyError as e:
- if desired_enabled == sushy.BOOT_SOURCE_ENABLED_CONTINUOUS:
+ if enabled == sushy.BOOT_SOURCE_ENABLED_CONTINUOUS:
# NOTE(dtantsur): continuous boot device settings have been
# removed from Redfish, and some vendors stopped supporting
# it before an alternative was provided. As a work around,
diff --git a/ironic/objects/fields.py b/ironic/objects/fields.py
index 528e998b2..1b7778945 100644
--- a/ironic/objects/fields.py
+++ b/ironic/objects/fields.py
@@ -14,8 +14,6 @@
# under the License.
import ast
-import hashlib
-import inspect
from oslo_versionedobjects import fields as object_fields
@@ -57,10 +55,7 @@ class StringFieldThatAcceptsCallable(object_fields.StringField):
default = self._default
if (self._default != object_fields.UnspecifiedDefault
and callable(self._default)):
- default = "%s-%s" % (
- self._default.__name__,
- hashlib.md5(inspect.getsource(
- self._default).encode()).hexdigest())
+ default = '<function %s>' % default.__name__
return '%s(default=%s,nullable=%s)' % (self._type.__class__.__name__,
default, self._nullable)
diff --git a/ironic/objects/node.py b/ironic/objects/node.py
index d38f1ecd0..0392ee283 100644
--- a/ironic/objects/node.py
+++ b/ironic/objects/node.py
@@ -441,10 +441,6 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
updates = self.do_version_changes_for_db()
self._validate_property_values(updates.get('properties'))
- if 'driver' in updates and 'driver_internal_info' not in updates:
- # Clean driver_internal_info when changes driver
- self.driver_internal_info = {}
- updates = self.do_version_changes_for_db()
self._validate_and_remove_traits(updates)
self._validate_and_format_conductor_group(updates)
db_node = self.dbapi.update_node(self.uuid, updates)
diff --git a/ironic/tests/base.py b/ironic/tests/base.py
index 69c12c408..eccdb9a1b 100644
--- a/ironic/tests/base.py
+++ b/ironic/tests/base.py
@@ -16,7 +16,7 @@
"""Base classes for our unit tests.
-Allows overriding of config for use of fakes, and some black magic for
+Allows overriding of config for use of fakes, and some magic for
inline callbacks.
"""
diff --git a/ironic/tests/unit/api/controllers/v1/test_deploy_template.py b/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
index 6f5b6e64c..b194dafdd 100644
--- a/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
+++ b/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
@@ -353,14 +353,15 @@ class TestPatch(BaseDeployTemplatesAPITest):
mock_save.assert_called_once_with(mock.ANY)
return response
- def _test_update_bad_request(self, mock_save, patch, error_msg):
+ def _test_update_bad_request(self, mock_save, patch, error_msg=None):
response = self.patch_json('/deploy_templates/%s' % self.template.uuid,
patch, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
- self.assertRegex(response.json['error_message'], error_msg)
+ if error_msg:
+ self.assertRegex(response.json['error_message'], error_msg)
self.assertFalse(mock_save.called)
return response
@@ -537,16 +538,14 @@ class TestPatch(BaseDeployTemplatesAPITest):
'priority': 42
}
patch = [{'path': '/steps/1', 'op': 'replace', 'value': step}]
- self._test_update_bad_request(
- mock_save, patch, "list assignment index out of range|"
- "can't replace outside of list")
+ self._test_update_bad_request(mock_save, patch)
def test_replace_empty_step_list_fail(self, mock_save):
patch = [{'path': '/steps', 'op': 'replace', 'value': []}]
self._test_update_bad_request(
mock_save, patch, 'No deploy steps specified')
- def _test_remove_not_allowed(self, mock_save, field, error_msg):
+ def _test_remove_not_allowed(self, mock_save, field, error_msg=None):
patch = [{'path': '/%s' % field, 'op': 'remove'}]
self._test_update_bad_request(mock_save, patch, error_msg)
@@ -566,8 +565,7 @@ class TestPatch(BaseDeployTemplatesAPITest):
"'/steps' is a mandatory attribute and can not be removed")
def test_remove_foo(self, mock_save):
- self._test_remove_not_allowed(
- mock_save, 'foo', "can't remove non-existent object 'foo'")
+ self._test_remove_not_allowed(mock_save, 'foo')
def test_replace_step_invalid_interface(self, mock_save):
patch = [{'path': '/steps/0/interface', 'op': 'replace',
@@ -632,14 +630,11 @@ class TestPatch(BaseDeployTemplatesAPITest):
def test_remove_non_existent_property_fail(self, mock_save):
patch = [{'path': '/non-existent', 'op': 'remove'}]
- self._test_update_bad_request(
- mock_save, patch,
- "can't remove non-existent object 'non-existent'")
+ self._test_update_bad_request(mock_save, patch)
def test_remove_non_existent_step_fail(self, mock_save):
patch = [{'path': '/steps/1', 'op': 'remove'}]
- self._test_update_bad_request(
- mock_save, patch, "can't remove non-existent object '1'")
+ self._test_update_bad_request(mock_save, patch)
def test_remove_only_step_fail(self, mock_save):
patch = [{'path': '/steps/0', 'op': 'remove'}]
@@ -648,9 +643,7 @@ class TestPatch(BaseDeployTemplatesAPITest):
def test_remove_non_existent_step_property_fail(self, mock_save):
patch = [{'path': '/steps/0/non-existent', 'op': 'remove'}]
- self._test_update_bad_request(
- mock_save, patch,
- "can't remove non-existent object 'non-existent'")
+ self._test_update_bad_request(mock_save, patch)
def test_add_root_non_existent(self, mock_save):
patch = [{'path': '/foo', 'value': 'bar', 'op': 'add'}]
@@ -665,8 +658,7 @@ class TestPatch(BaseDeployTemplatesAPITest):
'priority': 42
}
patch = [{'path': '/steps/2', 'op': 'add', 'value': step}]
- self._test_update_bad_request(
- mock_save, patch, "can't insert outside of list")
+ self._test_update_bad_request(mock_save, patch)
def test_add_multi(self, mock_save):
steps = [
diff --git a/ironic/tests/unit/api/controllers/v1/test_expose.py b/ironic/tests/unit/api/controllers/v1/test_expose.py
index 9cb8e96ab..bc8e9fbe7 100644
--- a/ironic/tests/unit/api/controllers/v1/test_expose.py
+++ b/ironic/tests/unit/api/controllers/v1/test_expose.py
@@ -257,11 +257,11 @@ class MyThingController(pecan.rest.RestController):
@expose.expose(str)
def no_content(self):
- return atypes.Response('nothing', status_code=204)
+ return atypes.PassthruResponse('nothing', status_code=204)
@expose.expose(str)
def response_content(self):
- return atypes.Response('nothing', status_code=200)
+ return atypes.PassthruResponse('nothing', status_code=200)
@expose.expose(str)
def ouch(self):
diff --git a/ironic/tests/unit/api/controllers/v1/test_utils.py b/ironic/tests/unit/api/controllers/v1/test_utils.py
index d4c888222..381422993 100644
--- a/ironic/tests/unit/api/controllers/v1/test_utils.py
+++ b/ironic/tests/unit/api/controllers/v1/test_utils.py
@@ -15,12 +15,12 @@
# under the License.
from http import client as http_client
+import io
from unittest import mock
import os_traits
from oslo_config import cfg
from oslo_utils import uuidutils
-from webob import static
from ironic import api
from ironic.api.controllers.v1 import node as api_node
@@ -105,18 +105,15 @@ class TestApiUtils(base.TestCase):
# Raises a KeyError.
doc = {}
patch = [{"op": "remove", "path": "/foo"}]
- self.assertRaisesRegex(exception.PatchError,
- "can't remove non-existent object 'foo'",
- utils.apply_jsonpatch, doc, patch)
+ self.assertRaises(exception.PatchError,
+ utils.apply_jsonpatch, doc, patch)
def test_apply_jsonpatch_replace_non_existent_list_item(self):
# Raises an IndexError.
doc = []
patch = [{"op": "replace", "path": "/0", "value": 42}]
- self.assertRaisesRegex(exception.PatchError,
- "can't replace outside of list|"
- "list assignment index out of range",
- utils.apply_jsonpatch, doc, patch)
+ self.assertRaises(exception.PatchError,
+ utils.apply_jsonpatch, doc, patch)
def test_get_patch_values_no_path(self):
patch = [{'path': '/name', 'op': 'update', 'value': 'node-0'}]
@@ -691,9 +688,8 @@ class TestVendorPassthru(base.TestCase):
passthru_mock.assert_called_once_with(
'fake-context', 'fake-ident', 'squarepants', 'POST',
'fake-data', 'fake-topic')
- self.assertIsInstance(response, atypes.Response)
+ self.assertIsInstance(response, atypes.PassthruResponse)
self.assertEqual('SpongeBob', response.obj)
- self.assertEqual(response.return_type, atypes.Unset)
sc = http_client.ACCEPTED if async_call else http_client.OK
self.assertEqual(sc, response.status_code)
@@ -709,11 +705,10 @@ class TestVendorPassthru(base.TestCase):
def test_driver_vendor_passthru_sync(self):
self._vendor_passthru(async_call=False, driver_passthru=True)
- @mock.patch.object(api, 'response', spec_set=['app_iter'])
@mock.patch.object(api, 'request',
spec_set=['method', 'context', 'rpcapi'])
def _test_vendor_passthru_attach(self, return_value, expct_return_value,
- mock_request, mock_response):
+ mock_request):
return_ = {'return': return_value, 'async': False, 'attach': True}
mock_request.method = 'get'
mock_request.context = 'fake-context'
@@ -726,13 +721,10 @@ class TestVendorPassthru(base.TestCase):
'fake-data', 'fake-topic')
# Assert file was attached to the response object
- self.assertIsInstance(mock_response.app_iter, static.FileIter)
- self.assertEqual(expct_return_value,
- mock_response.app_iter.file.read())
+ self.assertIsInstance(response.obj, io.BytesIO)
+ self.assertEqual(expct_return_value, response.obj.read())
# Assert response message is none
- self.assertIsInstance(response, atypes.Response)
- self.assertIsNone(response.obj)
- self.assertIsNone(response.return_type)
+ self.assertIsInstance(response, atypes.PassthruResponse)
self.assertEqual(http_client.OK, response.status_code)
def test_vendor_passthru_attach(self):
diff --git a/ironic/tests/unit/api/test_args.py b/ironic/tests/unit/api/test_args.py
new file mode 100644
index 000000000..549c2efe1
--- /dev/null
+++ b/ironic/tests/unit/api/test_args.py
@@ -0,0 +1,506 @@
+# Copyright 2020 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import decimal
+import io
+
+from webob import multidict
+
+from ironic.api import args
+from ironic.api.controllers.v1 import types
+from ironic.api import functions
+from ironic.api import types as atypes
+from ironic.common import exception
+from ironic.tests import base as test_base
+
+
+class Obj(atypes.Base):
+
+ id = atypes.wsattr(int, mandatory=True)
+ name = str
+ readonly_field = atypes.wsattr(str, readonly=True)
+ default_field = atypes.wsattr(str, default='foo')
+ unset_me = str
+
+
+class NestedObj(atypes.Base):
+ o = Obj
+
+
+class TestArgs(test_base.TestCase):
+
+ def test_fromjson_array(self):
+ atype = atypes.ArrayType(int)
+ self.assertEqual(
+ [0, 1, 1234, None],
+ args.fromjson_array(atype, [0, '1', '1_234', None])
+ )
+ self.assertRaises(ValueError, args.fromjson_array,
+ atype, ['one', 'two', 'three'])
+ self.assertRaises(ValueError, args.fromjson_array,
+ atype, 'one')
+
+ def test_fromjson_dict(self):
+ dtype = atypes.DictType(str, int)
+ self.assertEqual({
+ 'zero': 0,
+ 'one': 1,
+ 'etc': 1234,
+ 'none': None
+ }, args.fromjson_dict(dtype, {
+ 'zero': 0,
+ 'one': '1',
+ 'etc': '1_234',
+ 'none': None
+ }))
+
+ self.assertRaises(ValueError, args.fromjson_dict,
+ dtype, [])
+ self.assertRaises(ValueError, args.fromjson_dict,
+ dtype, {'one': 'one'})
+
+ def test_fromjson_bool(self):
+ for b in (1, 2, True, 'true', 't', 'yes', 'y', 'on', '1'):
+ self.assertTrue(args.fromjson_bool(b))
+ for b in (0, False, 'false', 'f', 'no', 'n', 'off', '0'):
+ self.assertFalse(args.fromjson_bool(b))
+ for b in ('yup', 'yeet', 'NOPE', 3.14):
+ self.assertRaises(ValueError, args.fromjson_bool, b)
+
+ def test_fromjson(self):
+ # parse None
+ self.assertIsNone(args.fromjson(None, None))
+
+ # parse array
+ atype = atypes.ArrayType(int)
+ self.assertEqual(
+ [0, 1, 1234, None],
+ args.fromjson(atype, [0, '1', '1_234', None])
+ )
+
+ # parse dict
+ dtype = atypes.DictType(str, int)
+ self.assertEqual({
+ 'zero': 0,
+ 'one': 1,
+ 'etc': 1234,
+ 'none': None
+ }, args.fromjson(dtype, {
+ 'zero': 0,
+ 'one': '1',
+ 'etc': '1_234',
+ 'none': None
+ }))
+
+ # parse bytes
+ self.assertEqual(
+ b'asdf',
+ args.fromjson(bytes, b'asdf')
+ )
+ self.assertEqual(
+ b'asdf',
+ args.fromjson(bytes, 'asdf')
+ )
+ self.assertEqual(
+ b'33',
+ args.fromjson(bytes, 33)
+ )
+ self.assertEqual(
+ b'3.14',
+ args.fromjson(bytes, 3.14)
+ )
+
+ # parse str
+ self.assertEqual(
+ 'asdf',
+ args.fromjson(str, b'asdf')
+ )
+ self.assertEqual(
+ 'asdf',
+ args.fromjson(str, 'asdf')
+ )
+
+ # parse int/float
+ self.assertEqual(
+ 3,
+ args.fromjson(int, '3')
+ )
+ self.assertEqual(
+ 3,
+ args.fromjson(int, 3)
+ )
+ self.assertEqual(
+ 3.14,
+ args.fromjson(float, 3.14)
+ )
+
+ # parse bool
+ self.assertFalse(args.fromjson(bool, 'no'))
+ self.assertTrue(args.fromjson(bool, 'yes'))
+
+ # parse decimal
+ self.assertEqual(
+ decimal.Decimal(3.14),
+ args.fromjson(decimal.Decimal, 3.14)
+ )
+
+ # parse datetime
+ expected = datetime.datetime(2015, 8, 13, 11, 38, 9, 496475)
+ self.assertEqual(
+ expected,
+ args.fromjson(datetime.datetime, '2015-08-13T11:38:09.496475')
+ )
+
+ # parse complex
+ n = args.fromjson(NestedObj, {'o': {
+ 'id': 1234,
+ 'name': 'an object'
+ }})
+ self.assertIsInstance(n.o, Obj)
+ self.assertEqual(1234, n.o.id)
+ self.assertEqual('an object', n.o.name)
+ self.assertEqual('foo', n.o.default_field)
+
+ # parse usertype
+ self.assertEqual(
+ ['0', '1', '2', 'three'],
+ args.fromjson(types.listtype, '0,1, 2, three')
+ )
+
+ def test_fromjson_complex(self):
+ n = args.fromjson_complex(NestedObj, {'o': {
+ 'id': 1234,
+ 'name': 'an object'
+ }})
+ self.assertIsInstance(n.o, Obj)
+ self.assertEqual(1234, n.o.id)
+ self.assertEqual('an object', n.o.name)
+ self.assertEqual('foo', n.o.default_field)
+
+ e = self.assertRaises(exception.UnknownAttribute,
+ args.fromjson_complex,
+ Obj, {'ooo': {}})
+ self.assertEqual({'ooo'}, e.attributes)
+
+ e = self.assertRaises(exception.InvalidInput, args.fromjson_complex,
+ Obj,
+ {'name': 'an object'})
+ self.assertEqual('id', e.fieldname)
+ self.assertEqual('Mandatory field missing.', e.msg)
+
+ e = self.assertRaises(exception.InvalidInput, args.fromjson_complex,
+ Obj,
+ {'id': 1234, 'readonly_field': 'foo'})
+ self.assertEqual('readonly_field', e.fieldname)
+ self.assertEqual('Cannot set read only field.', e.msg)
+
+ def test_parse(self):
+ # source as bytes
+ s = b'{"o": {"id": 1234, "name": "an object"}}'
+
+ # test bodyarg=True
+ n = args.parse(s, {"o": NestedObj}, True)['o']
+ self.assertEqual(1234, n.o.id)
+ self.assertEqual('an object', n.o.name)
+
+ # source as file
+ s = io.StringIO('{"o": {"id": 1234, "name": "an object"}}')
+
+ # test bodyarg=False
+ n = args.parse(s, {"o": Obj}, False)['o']
+ self.assertEqual(1234, n.id)
+ self.assertEqual('an object', n.name)
+
+ # fromjson ValueError
+ s = '{"o": ["id", "name"]}'
+ self.assertRaises(exception.InvalidInput, args.parse,
+ s, {"o": atypes.DictType(str, str)}, False)
+ s = '["id", "name"]'
+ self.assertRaises(exception.InvalidInput, args.parse,
+ s, {"o": atypes.DictType(str, str)}, True)
+
+ # fromjson UnknownAttribute
+ s = '{"o": {"foo": "bar", "id": 1234, "name": "an object"}}'
+ self.assertRaises(exception.UnknownAttribute, args.parse,
+ s, {"o": NestedObj}, True)
+ self.assertRaises(exception.UnknownAttribute, args.parse,
+ s, {"o": Obj}, False)
+
+ # invalid json
+ s = '{Sunn O)))}'
+ self.assertRaises(exception.ClientSideError, args.parse,
+ s, {"o": Obj}, False)
+
+ # extra args
+ s = '{"foo": "bar", "o": {"id": 1234, "name": "an object"}}'
+ self.assertRaises(exception.UnknownArgument, args.parse,
+ s, {"o": Obj}, False)
+
+ def test_from_param(self):
+ # datetime param
+ expected = datetime.datetime(2015, 8, 13, 11, 38, 9, 496475)
+ self.assertEqual(
+ expected,
+ args.from_param(datetime.datetime, '2015-08-13T11:38:09.496475')
+ )
+ self.assertIsNone(args.from_param(datetime.datetime, None))
+
+ # usertype param
+ self.assertEqual(
+ ['0', '1', '2', 'three'],
+ args.from_param(types.listtype, '0,1, 2, three')
+ )
+
+ # array param
+ atype = atypes.ArrayType(int)
+ self.assertEqual(
+ [0, 1, 1234, None],
+ args.from_param(atype, [0, '1', '1_234', None])
+ )
+ self.assertIsNone(args.from_param(atype, None))
+
+ # string param
+ self.assertEqual('foo', args.from_param(str, 'foo'))
+ self.assertIsNone(args.from_param(str, None))
+
+ # string param with from_params
+ hit_paths = set()
+ params = multidict.MultiDict(
+ foo='bar',
+ )
+ self.assertEqual(
+ 'bar',
+ args.from_params(str, params, 'foo', hit_paths)
+ )
+ self.assertEqual({'foo'}, hit_paths)
+
+ def test_array_from_params(self):
+ hit_paths = set()
+ datatype = atypes.ArrayType(str)
+ params = multidict.MultiDict(
+ foo='bar',
+ one='two'
+ )
+ self.assertEqual(
+ ['bar'],
+ args.from_params(datatype, params, 'foo', hit_paths)
+ )
+ self.assertEqual({'foo'}, hit_paths)
+ self.assertEqual(
+ ['two'],
+ args.array_from_params(datatype, params, 'one', hit_paths)
+ )
+ self.assertEqual({'foo', 'one'}, hit_paths)
+
+ def test_usertype_from_params(self):
+ hit_paths = set()
+ datatype = types.listtype
+ params = multidict.MultiDict(
+ foo='0,1, 2, three',
+ )
+ self.assertEqual(
+ ['0', '1', '2', 'three'],
+ args.usertype_from_params(datatype, params, 'foo', hit_paths)
+ )
+ self.assertEqual(
+ ['0', '1', '2', 'three'],
+ args.from_params(datatype, params, 'foo', hit_paths)
+ )
+ self.assertEqual(
+ atypes.Unset,
+ args.usertype_from_params(datatype, params, 'bar', hit_paths)
+ )
+
+ def test_args_from_args(self):
+
+ fromargs = ['one', 2, [0, '1', '2_34']]
+ fromkwargs = {'foo': '1, 2, 3'}
+
+ @functions.signature(str, str, int, atypes.ArrayType(int),
+ types.listtype)
+ def myfunc(self, first, second, third, foo):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+
+ newargs, newkwargs = args.args_from_args(funcdef, fromargs, fromkwargs)
+ self.assertEqual(['one', 2, [0, 1, 234]], newargs)
+ self.assertEqual({'foo': ['1', '2', '3']}, newkwargs)
+
+ def test_args_from_params(self):
+
+ @functions.signature(str, str, int, atypes.ArrayType(int),
+ types.listtype)
+ def myfunc(self, first, second, third, foo):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+ params = multidict.MultiDict(
+ foo='0,1, 2, three',
+ third='1',
+ second='2'
+ )
+ self.assertEqual(
+ ([], {'foo': ['0', '1', '2', 'three'], 'second': 2, 'third': [1]}),
+ args.args_from_params(funcdef, params)
+ )
+
+ # unexpected param
+ params = multidict.MultiDict(bar='baz')
+ self.assertRaises(exception.UnknownArgument, args.args_from_params,
+ funcdef, params)
+
+ # no params plus a body
+ params = multidict.MultiDict(__body__='')
+ self.assertEqual(
+ ([], {}),
+ args.args_from_params(funcdef, params)
+ )
+
+ def test_args_from_body(self):
+ @functions.signature(str, body=NestedObj)
+ def myfunc(self, nested):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+ mimetype = 'application/json'
+ body = b'{"o": {"id": 1234, "name": "an object"}}'
+ newargs, newkwargs = args.args_from_body(funcdef, body, mimetype)
+
+ self.assertEqual(1234, newkwargs['nested'].o.id)
+ self.assertEqual('an object', newkwargs['nested'].o.name)
+
+ self.assertEqual(
+ ((), {}),
+ args.args_from_body(funcdef, None, mimetype)
+ )
+
+ self.assertRaises(exception.ClientSideError, args.args_from_body,
+ funcdef, body, 'application/x-corba')
+
+ self.assertEqual(
+ ((), {}),
+ args.args_from_body(funcdef, body,
+ 'application/x-www-form-urlencoded')
+ )
+
+ def test_combine_args(self):
+
+ @functions.signature(str, str, int)
+ def myfunc(self, first, second,):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+
+ # empty
+ self.assertEqual(
+ ([], {}),
+ args.combine_args(
+ funcdef, (
+ ([], {}),
+ ([], {}),
+ )
+ )
+ )
+
+ # combine kwargs
+ self.assertEqual(
+ ([], {'first': 'one', 'second': 'two'}),
+ args.combine_args(
+ funcdef, (
+ ([], {}),
+ ([], {'first': 'one', 'second': 'two'}),
+ )
+ )
+ )
+
+ # combine mixed args
+ self.assertEqual(
+ ([], {'first': 'one', 'second': 'two'}),
+ args.combine_args(
+ funcdef, (
+ (['one'], {}),
+ ([], {'second': 'two'}),
+ )
+ )
+ )
+
+ # override kwargs
+ self.assertEqual(
+ ([], {'first': 'two'}),
+ args.combine_args(
+ funcdef, (
+ ([], {'first': 'one'}),
+ ([], {'first': 'two'}),
+ ),
+ allow_override=True
+ )
+ )
+
+ # override args
+ self.assertEqual(
+ ([], {'first': 'two', 'second': 'three'}),
+ args.combine_args(
+ funcdef, (
+ (['one', 'three'], {}),
+ (['two'], {}),
+ ),
+ allow_override=True
+ )
+ )
+
+ # can't override args
+ self.assertRaises(exception.ClientSideError, args.combine_args,
+ funcdef,
+ ((['one'], {}), (['two'], {})))
+
+ # can't override kwargs
+ self.assertRaises(exception.ClientSideError, args.combine_args,
+ funcdef,
+ (([], {'first': 'one'}), ([], {'first': 'two'})))
+
+ def test_get_args(self):
+ @functions.signature(str, str, int, atypes.ArrayType(int),
+ types.listtype, body=NestedObj)
+ def myfunc(self, first, second, third, foo, nested):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+ params = multidict.MultiDict(
+ foo='0,1, 2, three',
+ second='2'
+ )
+ mimetype = 'application/json'
+ body = b'{"o": {"id": 1234, "name": "an object"}}'
+ fromargs = ['one']
+ fromkwargs = {'third': '1'}
+
+ newargs, newkwargs = args.get_args(funcdef, fromargs, fromkwargs,
+ params, body, mimetype)
+ self.assertEqual([], newargs)
+ n = newkwargs.pop('nested')
+ self.assertEqual({
+ 'first': 'one',
+ 'foo': ['0', '1', '2', 'three'],
+ 'second': 2,
+ 'third': [1]},
+ newkwargs
+ )
+ self.assertEqual(1234, n.o.id)
+ self.assertEqual('an object', n.o.name)
+
+ # check_arguments missing mandatory argument 'second'
+ params = multidict.MultiDict(
+ foo='0,1, 2, three',
+ )
+ self.assertRaises(exception.MissingArgument, args.get_args,
+ funcdef, fromargs, fromkwargs,
+ params, body, mimetype)
diff --git a/ironic/tests/unit/api/test_hooks.py b/ironic/tests/unit/api/test_hooks.py
index 5f974d161..f5e8db3b6 100644
--- a/ironic/tests/unit/api/test_hooks.py
+++ b/ironic/tests/unit/api/test_hooks.py
@@ -103,7 +103,7 @@ class TestNoExceptionTracebackHook(base.BaseApiTest):
def setUp(self):
super(TestNoExceptionTracebackHook, self).setUp()
- p = mock.patch.object(root.Root, 'convert')
+ p = mock.patch.object(root, 'root')
self.root_convert_mock = p.start()
self.addCleanup(p.stop)
diff --git a/ironic/tests/unit/api/test_types.py b/ironic/tests/unit/api/test_types.py
new file mode 100644
index 000000000..fb39ff2b7
--- /dev/null
+++ b/ironic/tests/unit/api/test_types.py
@@ -0,0 +1,566 @@
+# coding: utf-8
+#
+# Copyright 2011-2019 the WSME authors and contributors
+# (See https://opendev.org/x/wsme/)
+#
+# This module is part of WSME and is also released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from ironic.api import types
+from ironic.common import exception as exc
+from ironic.tests import base as test_base
+
+
+def gen_class():
+ d = {}
+ exec('''class tmp(object): pass''', d)
+ return d['tmp']
+
+
+class TestTypes(test_base.TestCase):
+ def setUp(self):
+ super(TestTypes, self).setUp()
+ types.registry = types.Registry()
+
+ def test_default_usertype(self):
+ class MyType(types.UserType):
+ basetype = str
+
+ My = MyType()
+
+ assert My.validate('a') == 'a'
+ assert My.tobasetype('a') == 'a'
+ assert My.frombasetype('a') == 'a'
+
+ def test_unset(self):
+ u = types.Unset
+
+ assert not u
+
+ def test_flat_type(self):
+ class Flat(object):
+ aint = int
+ abytes = bytes
+ atext = str
+ afloat = float
+
+ types.register_type(Flat)
+
+ assert len(Flat._wsme_attributes) == 4
+ attrs = Flat._wsme_attributes
+ print(attrs)
+
+ assert attrs[0].key == 'aint'
+ assert attrs[0].name == 'aint'
+ assert isinstance(attrs[0], types.wsattr)
+ assert attrs[0].datatype == int
+ assert attrs[0].mandatory is False
+ assert attrs[1].key == 'abytes'
+ assert attrs[1].name == 'abytes'
+ assert attrs[2].key == 'atext'
+ assert attrs[2].name == 'atext'
+ assert attrs[3].key == 'afloat'
+ assert attrs[3].name == 'afloat'
+
+ def test_private_attr(self):
+ class WithPrivateAttrs(object):
+ _private = 12
+
+ types.register_type(WithPrivateAttrs)
+
+ assert len(WithPrivateAttrs._wsme_attributes) == 0
+
+ def test_attribute_order(self):
+ class ForcedOrder(object):
+ _wsme_attr_order = ('a2', 'a1', 'a3')
+ a1 = int
+ a2 = int
+ a3 = int
+
+ types.register_type(ForcedOrder)
+
+ print(ForcedOrder._wsme_attributes)
+ assert ForcedOrder._wsme_attributes[0].key == 'a2'
+ assert ForcedOrder._wsme_attributes[1].key == 'a1'
+ assert ForcedOrder._wsme_attributes[2].key == 'a3'
+
+ c = gen_class()
+ print(c)
+ types.register_type(c)
+ del c._wsme_attributes
+
+ c.a2 = int
+ c.a1 = int
+ c.a3 = int
+
+ types.register_type(c)
+
+ assert c._wsme_attributes[0].key == 'a1', c._wsme_attributes[0].key
+ assert c._wsme_attributes[1].key == 'a2'
+ assert c._wsme_attributes[2].key == 'a3'
+
+ def test_wsproperty(self):
+ class WithWSProp(object):
+ def __init__(self):
+ self._aint = 0
+
+ def get_aint(self):
+ return self._aint
+
+ def set_aint(self, value):
+ self._aint = value
+
+ aint = types.wsproperty(int, get_aint, set_aint, mandatory=True)
+
+ types.register_type(WithWSProp)
+
+ print(WithWSProp._wsme_attributes)
+ assert len(WithWSProp._wsme_attributes) == 1
+ a = WithWSProp._wsme_attributes[0]
+ assert a.key == 'aint'
+ assert a.datatype == int
+ assert a.mandatory
+
+ o = WithWSProp()
+ o.aint = 12
+
+ assert o.aint == 12
+
+ def test_nested(self):
+ class Inner(object):
+ aint = int
+
+ class Outer(object):
+ inner = Inner
+
+ types.register_type(Outer)
+
+ assert hasattr(Inner, '_wsme_attributes')
+ assert len(Inner._wsme_attributes) == 1
+
+ def test_inspect_with_inheritance(self):
+ class Parent(object):
+ parent_attribute = int
+
+ class Child(Parent):
+ child_attribute = int
+
+ types.register_type(Parent)
+ types.register_type(Child)
+
+ assert len(Child._wsme_attributes) == 2
+
+ def test_selfreftype(self):
+ class SelfRefType(object):
+ pass
+
+ SelfRefType.parent = SelfRefType
+
+ types.register_type(SelfRefType)
+
+ def test_inspect_with_property(self):
+ class AType(object):
+ @property
+ def test(self):
+ return 'test'
+
+ types.register_type(AType)
+
+ assert len(AType._wsme_attributes) == 0
+ assert AType().test == 'test'
+
+ def test_enum(self):
+ aenum = types.Enum(str, 'v1', 'v2')
+ assert aenum.basetype is str
+
+ class AType(object):
+ a = aenum
+
+ types.register_type(AType)
+
+ assert AType.a.datatype is aenum
+
+ obj = AType()
+ obj.a = 'v1'
+ assert obj.a == 'v1', repr(obj.a)
+
+ self.assertRaisesRegexp(exc.InvalidInput,
+ "Invalid input for field/attribute a. \
+Value: 'v3'. Value should be one of: v., v.",
+ setattr,
+ obj,
+ 'a',
+ 'v3')
+
+ def test_attribute_validation(self):
+ class AType(object):
+ alist = [int]
+ aint = int
+
+ types.register_type(AType)
+
+ obj = AType()
+
+ obj.alist = [1, 2, 3]
+ assert obj.alist == [1, 2, 3]
+ obj.aint = 5
+ assert obj.aint == 5
+
+ self.assertRaises(exc.InvalidInput, setattr, obj, 'alist', 12)
+ self.assertRaises(exc.InvalidInput, setattr, obj, 'alist', [2, 'a'])
+
+ def test_attribute_validation_minimum(self):
+ class ATypeInt(object):
+ attr = types.IntegerType(minimum=1, maximum=5)
+
+ types.register_type(ATypeInt)
+
+ obj = ATypeInt()
+ obj.attr = 2
+
+ # comparison between 'zero' value and intger minimum (1) raises a
+ # TypeError which must be wrapped into an InvalidInput exception
+ self.assertRaises(exc.InvalidInput, setattr, obj, 'attr', 'zero')
+
+ def test_text_attribute_conversion(self):
+ class SType(object):
+ atext = str
+ abytes = bytes
+
+ types.register_type(SType)
+
+ obj = SType()
+
+ obj.atext = b'somebytes'
+ assert obj.atext == 'somebytes'
+ assert isinstance(obj.atext, str)
+
+ obj.abytes = 'sometext'
+ assert obj.abytes == b'sometext'
+ assert isinstance(obj.abytes, bytes)
+
+ def test_named_attribute(self):
+ class ABCDType(object):
+ a_list = types.wsattr([int], name='a.list')
+ astr = str
+
+ types.register_type(ABCDType)
+
+ assert len(ABCDType._wsme_attributes) == 2
+ attrs = ABCDType._wsme_attributes
+
+ assert attrs[0].key == 'a_list', attrs[0].key
+ assert attrs[0].name == 'a.list', attrs[0].name
+ assert attrs[1].key == 'astr', attrs[1].key
+ assert attrs[1].name == 'astr', attrs[1].name
+
+ def test_wsattr_del(self):
+ class MyType(object):
+ a = types.wsattr(int)
+
+ types.register_type(MyType)
+
+ value = MyType()
+
+ value.a = 5
+ assert value.a == 5
+ del value.a
+ assert value.a is types.Unset
+
+ def test_validate_dict(self):
+ assert types.validate_value({int: str}, {1: '1', 5: '5'})
+
+ self.assertRaises(ValueError, types.validate_value,
+ {int: str}, [])
+
+ assert types.validate_value({int: str}, {'1': '1', 5: '5'})
+
+ self.assertRaises(ValueError, types.validate_value,
+ {int: str}, {1: 1, 5: '5'})
+
+ def test_validate_list_valid(self):
+ assert types.validate_value([int], [1, 2])
+ assert types.validate_value([int], ['5'])
+
+ def test_validate_list_empty(self):
+ assert types.validate_value([int], []) == []
+
+ def test_validate_list_none(self):
+ v = types.ArrayType(int)
+ assert v.validate(None) is None
+
+ def test_validate_list_invalid_member(self):
+ self.assertRaises(ValueError, types.validate_value, [int],
+ ['not-a-number'])
+
+ def test_validate_list_invalid_type(self):
+ self.assertRaises(ValueError, types.validate_value, [int], 1)
+
+ def test_validate_float(self):
+ self.assertEqual(types.validate_value(float, 1), 1.0)
+ self.assertEqual(types.validate_value(float, '1'), 1.0)
+ self.assertEqual(types.validate_value(float, 1.1), 1.1)
+ self.assertRaises(ValueError, types.validate_value, float, [])
+ self.assertRaises(ValueError, types.validate_value, float,
+ 'not-a-float')
+
+ def test_validate_int(self):
+ self.assertEqual(types.validate_value(int, 1), 1)
+ self.assertEqual(types.validate_value(int, '1'), 1)
+ self.assertRaises(ValueError, types.validate_value, int, 1.1)
+
+ def test_validate_integer_type(self):
+ v = types.IntegerType(minimum=1, maximum=10)
+ v.validate(1)
+ v.validate(5)
+ v.validate(10)
+ self.assertRaises(ValueError, v.validate, 0)
+ self.assertRaises(ValueError, v.validate, 11)
+
+ def test_validate_string_type(self):
+ v = types.StringType(min_length=1, max_length=10,
+ pattern='^[a-zA-Z0-9]*$')
+ v.validate('1')
+ v.validate('12345')
+ v.validate('1234567890')
+ self.assertRaises(ValueError, v.validate, '')
+ self.assertRaises(ValueError, v.validate, '12345678901')
+
+ # Test a pattern validation
+ v.validate('a')
+ v.validate('A')
+ self.assertRaises(ValueError, v.validate, '_')
+
+ def test_validate_string_type_precompile(self):
+ precompile = re.compile('^[a-zA-Z0-9]*$')
+ v = types.StringType(min_length=1, max_length=10,
+ pattern=precompile)
+
+ # Test a pattern validation
+ v.validate('a')
+ v.validate('A')
+ self.assertRaises(ValueError, v.validate, '_')
+
+ def test_validate_string_type_pattern_exception_message(self):
+ regex = '^[a-zA-Z0-9]*$'
+ v = types.StringType(pattern=regex)
+ try:
+ v.validate('_')
+ self.assertFail()
+ except ValueError as e:
+ self.assertIn(regex, str(e))
+
+ def test_register_invalid_array(self):
+ self.assertRaises(ValueError, types.register_type, [])
+ self.assertRaises(ValueError, types.register_type, [int, str])
+ self.assertRaises(AttributeError, types.register_type, [1])
+
+ def test_register_invalid_dict(self):
+ self.assertRaises(ValueError, types.register_type, {})
+ self.assertRaises(ValueError, types.register_type,
+ {int: str, str: int})
+ self.assertRaises(ValueError, types.register_type,
+ {types.Unset: str})
+
+ def test_list_attribute_no_auto_register(self):
+ class MyType(object):
+ aint = int
+
+ assert not hasattr(MyType, '_wsme_attributes')
+
+ self.assertRaises(TypeError, types.list_attributes, MyType)
+
+ assert not hasattr(MyType, '_wsme_attributes')
+
+ def test_list_of_complextypes(self):
+ class A(object):
+ bs = types.wsattr(['B'])
+
+ class B(object):
+ i = int
+
+ types.register_type(A)
+ types.register_type(B)
+
+ assert A.bs.datatype.item_type is B
+
+ def test_cross_referenced_types(self):
+ class A(object):
+ b = types.wsattr('B')
+
+ class B(object):
+ a = A
+
+ types.register_type(A)
+ types.register_type(B)
+
+ assert A.b.datatype is B
+
+ def test_base(self):
+ class B1(types.Base):
+ b2 = types.wsattr('B2')
+
+ class B2(types.Base):
+ b2 = types.wsattr('B2')
+
+ assert B1.b2.datatype is B2, repr(B1.b2.datatype)
+ assert B2.b2.datatype is B2
+
+ def test_base_init(self):
+ class C1(types.Base):
+ s = str
+
+ c = C1(s='test')
+ assert c.s == 'test'
+
+ def test_array_eq(self):
+ ell = [types.ArrayType(str)]
+ assert types.ArrayType(str) in ell
+
+ def test_array_sample(self):
+ s = types.ArrayType(str).sample()
+ assert isinstance(s, list)
+ assert s
+ assert s[0] == ''
+
+ def test_dict_sample(self):
+ s = types.DictType(str, str).sample()
+ assert isinstance(s, dict)
+ assert s
+ assert s == {'': ''}
+
+ def test_binary_to_base(self):
+ import base64
+ assert types.binary.tobasetype(None) is None
+ expected = base64.encodestring(b'abcdef')
+ assert types.binary.tobasetype(b'abcdef') == expected
+
+ def test_binary_from_base(self):
+ import base64
+ assert types.binary.frombasetype(None) is None
+ encoded = base64.encodestring(b'abcdef')
+ assert types.binary.frombasetype(encoded) == b'abcdef'
+
+ def test_wsattr_weakref_datatype(self):
+ # If the datatype inside the wsattr ends up a weakref, it
+ # should be converted to the real type when accessed again by
+ # the property getter.
+ import weakref
+ a = types.wsattr(int)
+ a.datatype = weakref.ref(int)
+ assert a.datatype is int
+
+ def test_wsattr_list_datatype(self):
+ # If the datatype inside the wsattr ends up a list of weakrefs
+ # to types, it should be converted to the real types when
+ # accessed again by the property getter.
+ import weakref
+ a = types.wsattr(int)
+ a.datatype = [weakref.ref(int)]
+ assert isinstance(a.datatype, list)
+ assert a.datatype[0] is int
+
+ def test_unregister(self):
+ class TempType(object):
+ pass
+ types.registry.register(TempType)
+ v = types.registry.lookup('TempType')
+ self.assertIs(v, TempType)
+ types.registry._unregister(TempType)
+ after = types.registry.lookup('TempType')
+ self.assertIsNone(after)
+
+ def test_unregister_twice(self):
+ class TempType(object):
+ pass
+ types.registry.register(TempType)
+ v = types.registry.lookup('TempType')
+ self.assertIs(v, TempType)
+ types.registry._unregister(TempType)
+ # Second call should not raise an exception
+ types.registry._unregister(TempType)
+ after = types.registry.lookup('TempType')
+ self.assertIsNone(after)
+
+ def test_unregister_array_type(self):
+ class TempType(object):
+ pass
+ t = [TempType]
+ types.registry.register(t)
+ self.assertNotEqual(types.registry.array_types, set())
+ types.registry._unregister(t)
+ self.assertEqual(types.registry.array_types, set())
+
+ def test_unregister_array_type_twice(self):
+ class TempType(object):
+ pass
+ t = [TempType]
+ types.registry.register(t)
+ self.assertNotEqual(types.registry.array_types, set())
+ types.registry._unregister(t)
+ # Second call should not raise an exception
+ types.registry._unregister(t)
+ self.assertEqual(types.registry.array_types, set())
+
+ def test_unregister_dict_type(self):
+ class TempType(object):
+ pass
+ t = {str: TempType}
+ types.registry.register(t)
+ self.assertNotEqual(types.registry.dict_types, set())
+ types.registry._unregister(t)
+ self.assertEqual(types.registry.dict_types, set())
+
+ def test_unregister_dict_type_twice(self):
+ class TempType(object):
+ pass
+ t = {str: TempType}
+ types.registry.register(t)
+ self.assertNotEqual(types.registry.dict_types, set())
+ types.registry._unregister(t)
+ # Second call should not raise an exception
+ types.registry._unregister(t)
+ self.assertEqual(types.registry.dict_types, set())
+
+ def test_reregister(self):
+ class TempType(object):
+ pass
+ types.registry.register(TempType)
+ v = types.registry.lookup('TempType')
+ self.assertIs(v, TempType)
+ types.registry.reregister(TempType)
+ after = types.registry.lookup('TempType')
+ self.assertIs(after, TempType)
+
+ def test_reregister_and_add_attr(self):
+ class TempType(object):
+ pass
+ types.registry.register(TempType)
+ attrs = types.list_attributes(TempType)
+ self.assertEqual(attrs, [])
+ TempType.one = str
+ types.registry.reregister(TempType)
+ after = types.list_attributes(TempType)
+ self.assertNotEqual(after, [])
+
+ def test_non_registered_complex_type(self):
+ class TempType(types.Base):
+ __registry__ = None
+
+ self.assertFalse(types.iscomplex(TempType))
+ types.registry.register(TempType)
+ self.assertTrue(types.iscomplex(TempType))
diff --git a/ironic/tests/unit/common/json_samples/neutron_network_show.json b/ironic/tests/unit/common/json_samples/neutron_network_show.json
new file mode 100644
index 000000000..7c54850ca
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_network_show.json
@@ -0,0 +1,33 @@
+{
+ "network": {
+ "admin_state_up": true,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2016-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
+ "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "ipv4_address_scope": null,
+ "ipv6_address_scope": null,
+ "l2_adjacency": false,
+ "mtu": 1500,
+ "name": "private-network",
+ "port_security_enabled": true,
+ "project_id": "4fd44f30292945e481c7b8a0c8908869",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
+ "router:external": false,
+ "shared": true,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tags": ["tag1,tag2"],
+ "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": false,
+ "description": "",
+ "is_default": true
+ }
+} \ No newline at end of file
diff --git a/ironic/tests/unit/common/json_samples/neutron_network_show_ipv6.json b/ironic/tests/unit/common/json_samples/neutron_network_show_ipv6.json
new file mode 100644
index 000000000..eb955e3b5
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_network_show_ipv6.json
@@ -0,0 +1,33 @@
+{
+ "network": {
+ "admin_state_up": true,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2016-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
+ "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "ipv4_address_scope": null,
+ "ipv6_address_scope": null,
+ "l2_adjacency": false,
+ "mtu": 1500,
+ "name": "private-network",
+ "port_security_enabled": true,
+ "project_id": "5199666e520f4aed823710aec37cfd38",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
+ "router:external": false,
+ "shared": true,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tags": ["tag1,tag2"],
+ "tenant_id": "5199666e520f4aed823710aec37cfd38",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": false,
+ "description": "",
+ "is_default": true
+ }
+}
diff --git a/ironic/tests/unit/common/json_samples/neutron_port_show.json b/ironic/tests/unit/common/json_samples/neutron_port_show.json
new file mode 100644
index 000000000..925f00fd0
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_port_show.json
@@ -0,0 +1,59 @@
+{
+ "port": {
+ "admin_state_up": true,
+ "allowed_address_pairs": [],
+ "binding:host_id": "devstack",
+ "binding:profile": {},
+ "binding:vif_details": {
+ "ovs_hybrid_plug": true,
+ "port_filter": true
+ },
+ "binding:vif_type": "ovs",
+ "binding:vnic_type": "normal",
+ "created_at": "2016-03-08T20:19:41",
+ "data_plane_status": "ACTIVE",
+ "description": "",
+ "device_id": "5e3898d7-11be-483e-9732-b2f5eccd2b2e",
+ "device_owner": "network:router_interface",
+ "dns_assignment": {
+ "hostname": "myport",
+ "ip_address": "10.0.0.2",
+ "fqdn": "myport.my-domain.org"
+ },
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myport",
+ "extra_dhcp_opts": [
+ {
+ "opt_value": "pxelinux.0",
+ "ip_version": 4,
+ "opt_name": "bootfile-name"
+ }
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "10.0.0.2",
+ "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2"
+ }
+ ],
+ "id": "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2",
+ "ip_allocation": "immediate",
+ "mac_address": "fa:16:3e:23:fd:d7",
+ "mac_learning_enabled": false,
+ "name": "",
+ "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7",
+ "port_security_enabled": false,
+ "project_id": "7e02058126cc4950b75f9970368ba177",
+ "revision_number": 1,
+ "security_groups": [],
+ "status": "ACTIVE",
+ "tags": ["tag1,tag2"],
+ "tenant_id": "7e02058126cc4950b75f9970368ba177",
+ "updated_at": "2016-03-08T20:19:41",
+ "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+ "resource_request": {
+ "required": ["CUSTOM_PHYSNET_PUBLIC", "CUSTOM_VNIC_TYPE_NORMAL"],
+ "resources": {"NET_BW_EGR_KILOBIT_PER_SEC": 1000}
+ },
+ "uplink_status_propagation": false
+ }
+} \ No newline at end of file
diff --git a/ironic/tests/unit/common/json_samples/neutron_port_show_ipv6.json b/ironic/tests/unit/common/json_samples/neutron_port_show_ipv6.json
new file mode 100644
index 000000000..1dd3ead68
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_port_show_ipv6.json
@@ -0,0 +1,59 @@
+{
+ "port": {
+ "admin_state_up": true,
+ "allowed_address_pairs": [],
+ "binding:host_id": "devstack",
+ "binding:profile": {},
+ "binding:vif_details": {
+ "ovs_hybrid_plug": true,
+ "port_filter": true
+ },
+ "binding:vif_type": "ovs",
+ "binding:vnic_type": "normal",
+ "created_at": "2016-03-08T20:19:41",
+ "data_plane_status": "ACTIVE",
+ "description": "",
+ "device_id": "5e3898d7-11be-483e-9732-b2f5eccd2b2e",
+ "device_owner": "network:router_interface",
+ "dns_assignment": {
+ "hostname": "myport",
+ "ip_address": "fd00:203:0:113::2",
+ "fqdn": "myport.my-domain.org"
+ },
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myport",
+ "extra_dhcp_opts": [
+ {
+ "opt_value": "pxelinux.0",
+ "ip_version": 6,
+ "opt_name": "bootfile-name"
+ }
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "fd00:203:0:113::2",
+ "subnet_id": "906e685a-b964-4d58-9939-9cf3af197c67"
+ }
+ ],
+ "id": "96d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb8",
+ "ip_allocation": "immediate",
+ "mac_address": "52:54:00:4f:ef:b7",
+ "mac_learning_enabled": false,
+ "name": "",
+ "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7",
+ "port_security_enabled": false,
+ "project_id": "7e02058126cc4950b75f9970368ba177",
+ "revision_number": 1,
+ "security_groups": [],
+ "status": "ACTIVE",
+ "tags": ["tag1,tag2"],
+ "tenant_id": "7e02058126cc4950b75f9970368ba177",
+ "updated_at": "2016-03-08T20:19:41",
+ "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+ "resource_request": {
+ "required": ["CUSTOM_PHYSNET_PUBLIC", "CUSTOM_VNIC_TYPE_NORMAL"],
+ "resources": {"NET_BW_EGR_KILOBIT_PER_SEC": 1000}
+ },
+ "uplink_status_propagation": false
+ }
+}
diff --git a/ironic/tests/unit/common/json_samples/neutron_subnet_show.json b/ironic/tests/unit/common/json_samples/neutron_subnet_show.json
new file mode 100644
index 000000000..f1b7ae5a5
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_subnet_show.json
@@ -0,0 +1,32 @@
+{
+ "subnet": {
+ "name": "private-subnet",
+ "enable_dhcp": true,
+ "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "segment_id": null,
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "dns_nameservers": [],
+ "dns_publish_fixed_ip": false,
+ "allocation_pools": [
+ {
+ "start": "10.0.0.2",
+ "end": "10.0.0.254"
+ }
+ ],
+ "host_routes": [],
+ "ip_version": 4,
+ "gateway_ip": "10.0.0.1",
+ "cidr": "10.0.0.0/24",
+ "id": "08eae331-0402-425a-923c-34f7cfe39c1b",
+ "created_at": "2016-10-10T14:35:34Z",
+ "description": "",
+ "ipv6_address_mode": null,
+ "ipv6_ra_mode": null,
+ "revision_number": 2,
+ "service_types": [],
+ "subnetpool_id": null,
+ "tags": ["tag1,tag2"],
+ "updated_at": "2016-10-10T14:35:34Z"
+ }
+}
diff --git a/ironic/tests/unit/common/json_samples/neutron_subnet_show_ipv6.json b/ironic/tests/unit/common/json_samples/neutron_subnet_show_ipv6.json
new file mode 100644
index 000000000..e5bd1e496
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_subnet_show_ipv6.json
@@ -0,0 +1,32 @@
+{
+ "subnet": {
+ "name": "private-subnet",
+ "enable_dhcp": true,
+ "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "segment_id": null,
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "dns_nameservers": [],
+ "dns_publish_fixed_ip": false,
+ "allocation_pools": [
+ {
+ "start": "fd00:203:0:113::2",
+ "end": "fd00:203:0:113:ffff:ffff:ffff:ffff"
+ }
+ ],
+ "host_routes": [],
+ "ip_version": 6,
+ "gateway_ip": "fd00:203:0:113::1",
+ "cidr": "fd00:203:0:113::/64",
+ "id": "08eae331-0402-425a-923c-34f7cfe39c1b",
+ "created_at": "2016-10-10T14:35:34Z",
+ "description": "",
+ "ipv6_address_mode": "slaac",
+ "ipv6_ra_mode": null,
+ "revision_number": 2,
+ "service_types": [],
+ "subnetpool_id": null,
+ "tags": ["tag1,tag2"],
+ "updated_at": "2016-10-10T14:35:34Z"
+ }
+}
diff --git a/ironic/tests/unit/common/test_images.py b/ironic/tests/unit/common/test_images.py
index 437bb41ef..8a90ab55b 100644
--- a/ironic/tests/unit/common/test_images.py
+++ b/ironic/tests/unit/common/test_images.py
@@ -911,6 +911,27 @@ class FsImageTestCase(base.TestCase):
'output_file', 'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid',
configdrive='tmpdir/configdrive', kernel_params=params)
+ @mock.patch.object(images, 'create_isolinux_image_for_bios', autospec=True)
+ @mock.patch.object(images, 'fetch', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ def test_create_boot_iso_for_existing_iso(self, tempdir_mock,
+ fetch_images_mock,
+ create_isolinux_mock):
+ mock_file_handle = mock.MagicMock(spec=io.BytesIO)
+ mock_file_handle.__enter__.return_value = 'tmpdir'
+ tempdir_mock.return_value = mock_file_handle
+ base_iso = 'http://fake.local:1234/fake.iso'
+ images.create_boot_iso('ctx', 'output_file', 'kernel-uuid',
+ 'ramdisk-uuid', 'deploy_iso-uuid',
+ 'efiboot-uuid', None,
+ None, None, 'http://configdrive',
+ base_iso=base_iso)
+
+ fetch_images_mock.assert_any_call(
+ 'ctx', base_iso, 'output_file')
+
+ create_isolinux_mock.assert_not_called()
+
@mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_get_glance_image_properties_no_such_prop(self,
image_service_mock):
diff --git a/ironic/tests/unit/common/test_json_rpc.py b/ironic/tests/unit/common/test_json_rpc.py
index e76500215..217a1128c 100644
--- a/ironic/tests/unit/common/test_json_rpc.py
+++ b/ironic/tests/unit/common/test_json_rpc.py
@@ -266,7 +266,7 @@ class TestService(test_base.TestCase):
'code': -32601,
})
- def test_no_blacklisted_methods(self):
+ def test_no_deny_methods(self):
for name in ('__init__', '_private', 'init_host', 'value'):
body = self._request(name, {'context': self.ctx})
self._check(body,
diff --git a/ironic/tests/unit/common/test_neutron.py b/ironic/tests/unit/common/test_neutron.py
index 02c989331..d290aaa61 100644
--- a/ironic/tests/unit/common/test_neutron.py
+++ b/ironic/tests/unit/common/test_neutron.py
@@ -11,6 +11,8 @@
# under the License.
import copy
+import json
+import os
import time
from unittest import mock
@@ -270,6 +272,30 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
patcher.start()
self.addCleanup(patcher.stop)
+ port_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_port_show.json')
+ with open(port_show_file, 'rb') as fl:
+ self.port_data = json.load(fl)
+
+ self.client_mock.show_port.return_value = self.port_data
+
+ network_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_network_show.json')
+ with open(network_show_file, 'rb') as fl:
+ self.network_data = json.load(fl)
+
+ self.client_mock.show_network.return_value = self.network_data
+
+ subnet_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_subnet_show.json')
+ with open(subnet_show_file, 'rb') as fl:
+ self.subnet_data = json.load(fl)
+
+ self.client_mock.show_subnet.return_value = self.subnet_data
+
@mock.patch.object(neutron, 'update_neutron_port', autospec=True)
def _test_add_ports_to_network(self, update_mock, is_client_id,
security_groups=None,
@@ -667,6 +693,103 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
self.client_mock.delete_port.assert_called_once_with(
self.neutron_port['id'])
+ def test__uncidr_ipv4(self):
+ network, netmask = neutron._uncidr('10.0.0.0/24')
+ self.assertEqual('10.0.0.0', network)
+ self.assertEqual('255.255.255.0', netmask)
+
+ def test__uncidr_ipv6(self):
+ network, netmask = neutron._uncidr('::1/64', ipv6=True)
+ self.assertEqual('::', network)
+ self.assertEqual('ffff:ffff:ffff:ffff::', netmask)
+
+ def test_get_neutron_port_data(self):
+
+ network_data = neutron.get_neutron_port_data('port0', 'vif0')
+
+ expected_port = {
+ 'id': 'port0',
+ 'type': 'vif',
+ 'ethernet_mac_address': 'fa:16:3e:23:fd:d7',
+ 'vif_id': '46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2',
+ 'mtu': 1500
+ }
+
+ self.assertEqual(expected_port, network_data['links'][0])
+
+ expected_network = {
+ 'id': 'a0304c3a-4f08-4c43-88af-d796509c97d2',
+ 'network_id': 'a87cc70a-3e15-4acf-8205-9b711a3531b7',
+ 'type': 'ipv4',
+ 'link': 'port0',
+ 'ip_address': '10.0.0.2',
+ 'netmask': '255.255.255.0',
+ 'routes': [
+ {'gateway': '10.0.0.1',
+ 'netmask': '0.0.0.0',
+ 'network': '0.0.0.0'}
+ ]
+ }
+
+ self.assertEqual(expected_network, network_data['networks'][0])
+
+ def load_ipv6_files(self):
+ port_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_port_show_ipv6.json')
+ with open(port_show_file, 'rb') as fl:
+ self.port_data = json.load(fl)
+
+ self.client_mock.show_port.return_value = self.port_data
+
+ network_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_network_show_ipv6.json')
+ with open(network_show_file, 'rb') as fl:
+ self.network_data = json.load(fl)
+
+ self.client_mock.show_network.return_value = self.network_data
+
+ subnet_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_subnet_show_ipv6.json')
+ with open(subnet_show_file, 'rb') as fl:
+ self.subnet_data = json.load(fl)
+
+ self.client_mock.show_subnet.return_value = self.subnet_data
+
+ def test_get_neutron_port_data_ipv6(self):
+ self.load_ipv6_files()
+
+ network_data = neutron.get_neutron_port_data('port1', 'vif1')
+
+ print(network_data)
+ expected_port = {
+ 'id': 'port1',
+ 'type': 'vif',
+ 'ethernet_mac_address': '52:54:00:4f:ef:b7',
+ 'vif_id': '96d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb8',
+ 'mtu': 1500
+ }
+
+ self.assertEqual(expected_port, network_data['links'][0])
+
+ expected_network = {
+ 'id': '906e685a-b964-4d58-9939-9cf3af197c67',
+ 'network_id': 'a87cc70a-3e15-4acf-8205-9b711a3531b7',
+ 'type': 'ipv6',
+ 'link': 'port1',
+ 'ip_address': 'fd00:203:0:113::2',
+ 'netmask': 'ffff:ffff:ffff:ffff::',
+ 'routes': [
+ {'gateway': 'fd00:203:0:113::1',
+ 'netmask': '::0',
+ 'network': '::0'}
+ ]
+ }
+
+ self.assertEqual(expected_network, network_data['networks'][0])
+
def test_get_node_portmap(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
portmap = neutron.get_node_portmap(task)
diff --git a/ironic/tests/unit/common/test_pxe_utils.py b/ironic/tests/unit/common/test_pxe_utils.py
index bc1a31645..c647147a2 100644
--- a/ironic/tests/unit/common/test_pxe_utils.py
+++ b/ironic/tests/unit/common/test_pxe_utils.py
@@ -645,7 +645,7 @@ class TestPXEUtils(db_base.DbTestCase):
'config'),
pxe_utils.get_pxe_config_file_path(self.node.uuid))
- def _dhcp_options_for_instance(self, ip_version=4):
+ def _dhcp_options_for_instance(self, ip_version=4, ipxe=False):
self.config(ip_version=ip_version, group='pxe')
if ip_version == 4:
self.config(tftp_server='192.0.2.1', group='pxe')
@@ -653,6 +653,10 @@ class TestPXEUtils(db_base.DbTestCase):
self.config(tftp_server='ff80::1', group='pxe')
self.config(pxe_bootfile_name='fake-bootfile', group='pxe')
self.config(tftp_root='/tftp-path/', group='pxe')
+ if ipxe:
+ bootfile = 'fake-bootfile-ipxe'
+ else:
+ bootfile = 'fake-bootfile'
if ip_version == 6:
# NOTE(TheJulia): DHCPv6 RFCs seem to indicate that the prior
@@ -660,11 +664,11 @@ class TestPXEUtils(db_base.DbTestCase):
# by vendors. The apparent proper option is to return a
# URL in the field https://tools.ietf.org/html/rfc5970#section-3
expected_info = [{'opt_name': '59',
- 'opt_value': 'tftp://[ff80::1]/fake-bootfile',
+ 'opt_value': 'tftp://[ff80::1]/%s' % bootfile,
'ip_version': ip_version}]
elif ip_version == 4:
expected_info = [{'opt_name': '67',
- 'opt_value': 'fake-bootfile',
+ 'opt_value': bootfile,
'ip_version': ip_version},
{'opt_name': '210',
'opt_value': '/tftp-path/',
@@ -1320,7 +1324,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
# URL in the field https://tools.ietf.org/html/rfc5970#section-3
expected_boot_script_url = 'http://[ff80::1]:1234/boot.ipxe'
expected_info = [{'opt_name': '!175,59',
- 'opt_value': 'tftp://[ff80::1]/fake-bootfile',
+ 'opt_value': 'tftp://[ff80::1]/%s' % boot_file,
'ip_version': ip_version},
{'opt_name': '59',
'opt_value': expected_boot_script_url,
@@ -1352,7 +1356,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
if ip_version == 6:
# Boot URL variable set from prior test of isc parameters.
expected_info = [{'opt_name': 'tag:!ipxe6,59',
- 'opt_value': 'tftp://[ff80::1]/fake-bootfile',
+ 'opt_value': 'tftp://[ff80::1]/%s' % boot_file,
'ip_version': ip_version},
{'opt_name': 'tag:ipxe6,59',
'opt_value': expected_boot_script_url,
@@ -1381,23 +1385,23 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
def test_dhcp_options_for_instance_ipxe_bios(self):
self.config(ip_version=4, group='pxe')
- boot_file = 'fake-bootfile-bios'
- self.config(pxe_bootfile_name=boot_file, group='pxe')
+ boot_file = 'fake-bootfile-bios-ipxe'
+ self.config(ipxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
self._dhcp_options_for_instance_ipxe(task, boot_file)
def test_dhcp_options_for_instance_ipxe_uefi(self):
self.config(ip_version=4, group='pxe')
- boot_file = 'fake-bootfile-uefi'
- self.config(uefi_pxe_bootfile_name=boot_file, group='pxe')
+ boot_file = 'fake-bootfile-uefi-ipxe'
+ self.config(uefi_ipxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
self._dhcp_options_for_instance_ipxe(task, boot_file)
def test_dhcp_options_for_ipxe_ipv6(self):
self.config(ip_version=6, group='pxe')
- boot_file = 'fake-bootfile'
- self.config(pxe_bootfile_name=boot_file, group='pxe')
+ boot_file = 'fake-bootfile-ipxe'
+ self.config(ipxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
self._dhcp_options_for_instance_ipxe(task, boot_file, ip_version=6)
diff --git a/ironic/tests/unit/conductor/test_deployments.py b/ironic/tests/unit/conductor/test_deployments.py
index df078032d..9d3c24f1d 100644
--- a/ironic/tests/unit/conductor/test_deployments.py
+++ b/ironic/tests/unit/conductor/test_deployments.py
@@ -20,6 +20,7 @@ from oslo_db import exception as db_exception
from oslo_utils import uuidutils
from ironic.common import exception
+from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.conductor import deployments
@@ -373,6 +374,39 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNotNone(node.last_error)
self.assertFalse(mock_deploy.called)
+ @mock.patch.object(task_manager.TaskManager, 'process_event',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.validate')
+ @mock.patch.object(conductor_steps, 'validate_deploy_templates',
+ autospec=True)
+ @mock.patch.object(conductor_utils, 'validate_instance_info_traits',
+ autospec=True)
+ @mock.patch.object(images, 'is_whole_disk_image', autospec=True)
+ def test_start_deploy(self, mock_iwdi, mock_validate_traits,
+ mock_validate_templates, mock_deploy_validate,
+ mock_power_validate, mock_process_event):
+ self._start_service()
+ mock_iwdi.return_value = False
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.AVAILABLE,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ deployments.start_deploy(task, self.service, configdrive=None,
+ event='deploy')
+ node.refresh()
+ self.assertTrue(mock_iwdi.called)
+ mock_power_validate.assert_called_once_with(task)
+ mock_deploy_validate.assert_called_once_with(task)
+ mock_validate_traits.assert_called_once_with(task.node)
+ mock_validate_templates.assert_called_once_with(
+ task, skip_missing=True)
+ mock_process_event.assert_called_with(
+ mock.ANY, 'deploy', call_args=(
+ deployments.do_node_deploy, task, 1, None),
+ callback=mock.ANY, err_handler=mock.ANY)
+
@mgr_utils.mock_record_keepalive
class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin,
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index f0d25d666..9ae4a0428 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -960,6 +960,51 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNone(node.instance_uuid)
self.assertIsNone(node.allocation_id)
+ def test_update_node_maintenance_with_broken_interface(self):
+ # Updates of non-driver fields are possible with a broken driver
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ power_interface='foobar',
+ extra={'test': 'one'})
+
+ node.maintenance = True
+ res = self.service.update_node(self.context, node)
+ self.assertTrue(res.maintenance)
+
+ node.refresh()
+ self.assertTrue(node.maintenance)
+ self.assertEqual('foobar', node.power_interface)
+
+ def test_update_node_interface_field_with_broken_interface(self):
+ # Updates of driver fields are NOT possible with a broken driver,
+ # unless they're fixing the breakage.
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ power_interface='foobar',
+ deploy_interface='fake',
+ extra={'test': 'one'})
+
+ node.deploy_interface = 'iscsi'
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.update_node,
+ self.context, node)
+ self.assertEqual(exception.InterfaceNotFoundInEntrypoint,
+ exc.exc_info[0])
+
+ node.refresh()
+ self.assertEqual('foobar', node.power_interface)
+ self.assertEqual('fake', node.deploy_interface)
+
+ def test_update_node_fix_broken_interface(self):
+ # Updates of non-driver fields are possible with a broken driver
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ power_interface='foobar',
+ extra={'test': 'one'})
+
+ node.power_interface = 'fake'
+ self.service.update_node(self.context, node)
+
+ node.refresh()
+ self.assertEqual('fake', node.power_interface)
+
@mgr_utils.mock_record_keepalive
class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@@ -2088,12 +2133,12 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNone(node.driver_internal_info['deploy_steps'])
self.assertNotIn('root_uuid_or_disk_id', node.driver_internal_info)
self.assertNotIn('is_whole_disk_image', node.driver_internal_info)
- mock_tear_down.assert_called_once_with(mock.ANY, task)
+ mock_tear_down.assert_called_once_with(task.driver.deploy, task)
mock_clean.assert_called_once_with(task)
self.assertEqual({}, port.internal_info)
mock_unbind.assert_called_once_with('foo', context=mock.ANY)
if enabled_console:
- mock_console.assert_called_once_with(mock.ANY, task)
+ mock_console.assert_called_once_with(task.driver.console, task)
else:
self.assertFalse(mock_console.called)
if with_allocation:
@@ -3609,6 +3654,15 @@ class DestroyNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.service.destroy_node(self.context, node.uuid)
self.assertFalse(mock_power.called)
+ def test_destroy_node_broken_driver(self):
+ node = obj_utils.create_test_node(self.context,
+ power_interface='broken')
+ self._start_service()
+ self.service.destroy_node(self.context, node.uuid)
+ self.assertRaises(exception.NodeNotFound,
+ self.dbapi.get_node_by_uuid,
+ node.uuid)
+
@mgr_utils.mock_record_keepalive
class CreatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@@ -4824,6 +4878,21 @@ class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
self.service.power_state_sync_count[self.node.uuid])
@mock.patch.object(nova, 'power_update', autospec=True)
+ def test_no_power_sync_support(self, mock_power_update, node_power_action):
+ self.config(force_power_state_during_sync=True, group='conductor')
+ self.power.supports_power_sync.return_value = False
+
+ self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
+
+ self.assertFalse(self.power.validate.called)
+ self.power.get_power_state.assert_called_once_with(self.task)
+ self.assertFalse(node_power_action.called)
+ self.assertEqual(states.POWER_OFF, self.node.power_state)
+ self.task.upgrade_lock.assert_called_once_with()
+ mock_power_update.assert_called_once_with(
+ self.task.context, self.node.instance_uuid, states.POWER_OFF)
+
+ @mock.patch.object(nova, 'power_update', autospec=True)
def test_max_retries_exceeded(self, mock_power_update, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
self.config(power_state_sync_max_retries=1, group='conductor')
@@ -6012,7 +6081,7 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.MANAGEABLE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertIsNone(node.last_error)
- mock_inspect.assert_called_once_with(mock.ANY, task)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
task.node.refresh()
self.assertNotIn('agent_url', task.node.driver_internal_info)
@@ -6031,7 +6100,7 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIn('driver returned unexpected state', node.last_error)
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- mock_inspect.assert_called_once_with(mock.ANY, task)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
autospec=True)
@@ -6046,7 +6115,7 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.INSPECTWAIT, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
- mock_inspect.assert_called_once_with(mock.ANY, task)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
@mock.patch.object(manager, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
@@ -6063,7 +6132,7 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
- mock_inspect.assert_called_once_with(mock.ANY, task)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
self.assertTrue(log_mock.error.called)
def test__check_inspect_wait_timeouts(self):
@@ -6465,10 +6534,8 @@ class DestroyPortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.context,
node_id=node.id,
internal_info={'tenant_vif_port_id': 'fake-id'})
- exc = self.assertRaises(messaging.rpc.ExpectedException,
- self.service.destroy_port,
- self.context, port)
- self.assertEqual(exception.InvalidState, exc.exc_info[0])
+ self.service.destroy_port(self.context, port)
+ self.assertRaises(exception.PortNotFound, port.refresh)
def test_destroy_port_node_active_and_maintenance_no_vif(self):
instance_uuid = uuidutils.generate_uuid()
@@ -6828,8 +6895,8 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
self.assertIsNone(node.last_error)
self.assertFalse(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY, task)
- mock_take_over.assert_called_once_with(mock.ANY, task)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
@mock.patch.object(notification_utils, 'emit_console_notification',
@@ -6853,9 +6920,9 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
self.assertIsNone(node.last_error)
self.assertTrue(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY, task)
- mock_take_over.assert_called_once_with(mock.ANY, task)
- mock_start_console.assert_called_once_with(mock.ANY, task)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
+ mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
@@ -6884,9 +6951,9 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
self.assertIsNotNone(node.last_error)
self.assertFalse(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY, task)
- mock_take_over.assert_called_once_with(mock.ANY, task)
- mock_start_console.assert_called_once_with(mock.ANY, task)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
+ mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
@@ -6922,9 +6989,9 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNone(
node.driver_internal_info.get('allocated_ipmi_terminal_port',
None))
- mock_prepare.assert_called_once_with(mock.ANY, task)
- mock_take_over.assert_called_once_with(mock.ANY, task)
- mock_start_console.assert_called_once_with(mock.ANY, task)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
+ mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
@@ -6967,8 +7034,8 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.ACTIVE, node.provision_state)
self.assertIsNone(node.last_error)
self.assertFalse(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY, task)
- mock_take_over.assert_called_once_with(mock.ANY, task)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
self.assertTrue(mock_boot_validate.called)
self.assertIn('is_whole_disk_image', task.node.driver_internal_info)
@@ -7003,8 +7070,8 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.ADOPTFAIL, node.provision_state)
self.assertIsNotNone(node.last_error)
self.assertFalse(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY, task)
- mock_take_over.assert_called_once_with(mock.ANY, task)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
self.assertTrue(mock_boot_validate.called)
self.assertIn('is_whole_disk_image', task.node.driver_internal_info)
@@ -7185,9 +7252,10 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.side_effect = self._fake_spawn
- self.assertRaises(
- exception.InvalidParameterValue, self.service.heartbeat,
+ exc = self.assertRaises(
+ messaging.rpc.ExpectedException, self.service.heartbeat,
self.context, node.uuid, 'http://callback', agent_token=None)
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
@@ -7260,10 +7328,11 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.side_effect = self._fake_spawn
- self.assertRaises(exception.InvalidParameterValue,
- self.service.heartbeat, self.context,
- node.uuid, 'http://callback',
- agent_token='evil', agent_version='5.0.0b23')
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.heartbeat, self.context,
+ node.uuid, 'http://callback',
+ agent_token='evil', agent_version='5.0.0b23')
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
@@ -7287,10 +7356,11 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.side_effect = self._fake_spawn
# Intentionally sending an older client in case something fishy
# occurs.
- self.assertRaises(exception.InvalidParameterValue,
- self.service.heartbeat, self.context,
- node.uuid, 'http://callback',
- agent_token='evil', agent_version='4.0.0')
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.heartbeat, self.context,
+ node.uuid, 'http://callback',
+ agent_token='evil', agent_version='4.0.0')
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
@@ -7312,10 +7382,11 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.side_effect = self._fake_spawn
- self.assertRaises(exception.InvalidParameterValue,
- self.service.heartbeat, self.context,
- node.uuid, 'http://callback',
- agent_token=None, agent_version='6.1.5')
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.heartbeat, self.context,
+ node.uuid, 'http://callback',
+ agent_token=None, agent_version='6.1.5')
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
diff --git a/ironic/tests/unit/conductor/test_task_manager.py b/ironic/tests/unit/conductor/test_task_manager.py
index d0c547f40..8a57d7dea 100644
--- a/ironic/tests/unit/conductor/test_task_manager.py
+++ b/ironic/tests/unit/conductor/test_task_manager.py
@@ -35,14 +35,14 @@ from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
-@mock.patch.object(objects.Node, 'get')
-@mock.patch.object(objects.Node, 'release')
-@mock.patch.object(objects.Node, 'reserve')
-@mock.patch.object(driver_factory, 'build_driver_for_task')
-@mock.patch.object(objects.Port, 'list_by_node_id')
-@mock.patch.object(objects.Portgroup, 'list_by_node_id')
-@mock.patch.object(objects.VolumeConnector, 'list_by_node_id')
-@mock.patch.object(objects.VolumeTarget, 'list_by_node_id')
+@mock.patch.object(objects.Node, 'get', autospec=True)
+@mock.patch.object(objects.Node, 'release', autospec=True)
+@mock.patch.object(objects.Node, 'reserve', autospec=True)
+@mock.patch.object(driver_factory, 'build_driver_for_task', autospec=True)
+@mock.patch.object(objects.Port, 'list_by_node_id', autospec=True)
+@mock.patch.object(objects.Portgroup, 'list_by_node_id', autospec=True)
+@mock.patch.object(objects.VolumeConnector, 'list_by_node_id', autospec=True)
+@mock.patch.object(objects.VolumeTarget, 'list_by_node_id', autospec=True)
class TaskManagerTestCase(db_base.DbTestCase):
def setUp(self):
super(TaskManagerTestCase, self).setUp()
@@ -673,7 +673,7 @@ class TaskManagerTestCase(db_base.DbTestCase):
on_error_handler.assert_called_once_with(expected_exception,
'fake-argument')
- @mock.patch.object(states.machine, 'copy')
+ @mock.patch.object(states.machine, 'copy', autospec=True)
def test_init_prepares_fsm(
self, copy_mock, get_volconn_mock, get_voltgt_mock,
get_portgroups_mock, get_ports_mock,
diff --git a/ironic/tests/unit/conductor/test_utils.py b/ironic/tests/unit/conductor/test_utils.py
index bb8027046..0dea519e2 100644
--- a/ironic/tests/unit/conductor/test_utils.py
+++ b/ironic/tests/unit/conductor/test_utils.py
@@ -225,10 +225,35 @@ class NodePowerActionTestCase(db_base.DbTestCase):
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_node_power_action_power_off(self, get_power_mock):
"""Test node_power_action to turn node power off."""
+ dii = {'agent_secret_token': 'token'}
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake-hardware',
- power_state=states.POWER_ON)
+ power_state=states.POWER_ON,
+ driver_internal_info=dii)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ get_power_mock.return_value = states.POWER_ON
+
+ conductor_utils.node_power_action(task, states.POWER_OFF)
+
+ node.refresh()
+ get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
+ self.assertEqual(states.POWER_OFF, node['power_state'])
+ self.assertIsNone(node['target_power_state'])
+ self.assertIsNone(node['last_error'])
+ self.assertNotIn('agent_secret_token', node['driver_internal_info'])
+
+ @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
+ def test_node_power_action_power_off_pregenerated_token(self,
+ get_power_mock):
+ dii = {'agent_secret_token': 'token',
+ 'agent_secret_token_pregenerated': True}
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='fake-hardware',
+ power_state=states.POWER_ON,
+ driver_internal_info=dii)
task = task_manager.TaskManager(self.context, node.uuid)
get_power_mock.return_value = states.POWER_ON
@@ -240,6 +265,8 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
+ self.assertEqual('token',
+ node['driver_internal_info']['agent_secret_token'])
@mock.patch.object(fake.FakePower, 'reboot', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
@@ -2100,3 +2127,9 @@ class GetAttachedVifTestCase(db_base.DbTestCase):
vif, use = conductor_utils.get_attached_vif(self.port)
self.assertEqual('1', vif)
self.assertEqual('rescuing', use)
+
+ def test_get_attached_vif_inspecting(self):
+ self.port.internal_info = {'inspection_vif_port_id': '1'}
+ vif, use = conductor_utils.get_attached_vif(self.port)
+ self.assertEqual('1', vif)
+ self.assertEqual('inspecting', use)
diff --git a/ironic/tests/unit/drivers/modules/ansible/test_deploy.py b/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
index 94ebe4a40..cef1fabd4 100644
--- a/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
+++ b/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
@@ -890,13 +890,11 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
run_playbook_mock.assert_called_once_with(
task.node, 'test_pl', ironic_nodes, 'test_k')
- @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
return_value=states.POWER_OFF, autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
- def test_reboot_and_finish_deploy_force_reboot(
- self, power_action_mock, get_pow_state_mock,
- power_on_node_if_needed_mock):
+ def test_tear_down_agent_force_reboot(
+ self, power_action_mock, get_pow_state_mock):
d_info = self.node.driver_info
d_info['deploy_forces_oob_reboot'] = True
self.node.driver_info = d_info
@@ -906,27 +904,15 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
self.node.provision_state = states.DEPLOYING
self.node.save()
- power_on_node_if_needed_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid) as task:
- with mock.patch.object(task.driver, 'network',
- autospec=True) as net_mock:
- self.driver.reboot_and_finish_deploy(task)
- net_mock.remove_provisioning_network.assert_called_once_with(
- task)
- net_mock.configure_tenant_networks.assert_called_once_with(
- task)
- expected_power_calls = [((task, states.POWER_OFF),),
- ((task, states.POWER_ON),)]
- self.assertEqual(expected_power_calls,
- power_action_mock.call_args_list)
+ self.driver.tear_down_agent(task)
+ power_action_mock.assert_called_once_with(task, states.POWER_OFF)
get_pow_state_mock.assert_not_called()
- @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
- def test_reboot_and_finish_deploy_soft_poweroff_retry(
- self, power_action_mock, run_playbook_mock,
- power_on_node_if_needed_mock):
+ def test_tear_down_agent_soft_poweroff_retry(
+ self, power_action_mock, run_playbook_mock):
self.config(group='ansible',
post_deploy_get_power_state_retry_interval=0)
self.config(group='ansible',
@@ -937,84 +923,38 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
self.node.driver_internal_info = di_info
self.node.save()
- power_on_node_if_needed_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid) as task:
- with mock.patch.object(task.driver, 'network',
- autospec=True) as net_mock:
- with mock.patch.object(task.driver.power,
- 'get_power_state',
- return_value=states.POWER_ON,
- autospec=True) as p_mock:
- self.driver.reboot_and_finish_deploy(task)
- p_mock.assert_called_with(task)
- self.assertEqual(2, len(p_mock.mock_calls))
- net_mock.remove_provisioning_network.assert_called_once_with(
- task)
- net_mock.configure_tenant_networks.assert_called_once_with(
- task)
- power_action_mock.assert_has_calls(
- [mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- expected_power_calls = [((task, states.POWER_OFF),),
- ((task, states.POWER_ON),)]
- self.assertEqual(expected_power_calls,
- power_action_mock.call_args_list)
+ with mock.patch.object(task.driver.power,
+ 'get_power_state',
+ return_value=states.POWER_ON,
+ autospec=True) as p_mock:
+ self.driver.tear_down_agent(task)
+ p_mock.assert_called_with(task)
+ self.assertEqual(2, len(p_mock.mock_calls))
+ power_action_mock.assert_called_once_with(task, states.POWER_OFF)
run_playbook_mock.assert_called_once_with(
task.node, 'shutdown.yaml', mock.ANY, mock.ANY)
+ @mock.patch.object(utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(ansible_deploy, '_get_node_ip', autospec=True,
return_value='1.2.3.4')
- def test_continue_deploy(self, getip_mock):
- self.node.provision_state = states.DEPLOYWAIT
+ def test_write_image(self, getip_mock, bootdev_mock):
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.multiple(self.driver, autospec=True,
_ansible_deploy=mock.DEFAULT,
reboot_to_instance=mock.DEFAULT):
- self.driver.continue_deploy(task)
+ result = self.driver.write_image(task)
+ self.assertIsNone(result)
getip_mock.assert_called_once_with(task)
self.driver._ansible_deploy.assert_called_once_with(
task, '1.2.3.4')
- self.driver.reboot_to_instance.assert_called_once_with(task)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- self.assertEqual(states.DEPLOYING, task.node.provision_state)
-
- @mock.patch.object(utils, 'notify_conductor_resume_deploy', autospec=True)
- @mock.patch.object(utils, 'node_set_boot_device', autospec=True)
- def test_reboot_to_instance(self, bootdev_mock, resume_mock):
- self.node.provision_state = states.DEPLOYING
- self.node.deploy_step = {
- 'step': 'deploy', 'priority': 100, 'interface': 'deploy'}
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid) as task:
- with mock.patch.object(self.driver, 'reboot_and_finish_deploy',
- autospec=True):
- task.driver.boot = mock.Mock()
- self.driver.reboot_to_instance(task)
bootdev_mock.assert_called_once_with(task, 'disk',
persistent=True)
- resume_mock.assert_called_once_with(task)
- self.driver.reboot_and_finish_deploy.assert_called_once_with(
- task)
- task.driver.boot.clean_up_ramdisk.assert_called_once_with(
- task)
-
- @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True)
- @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
- @mock.patch.object(utils, 'node_power_action', autospec=True)
- def test_tear_down_with_smartnic_port(
- self, power_mock, power_on_node_if_needed_mock,
- restore_power_state_mock):
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- driver_return = self.driver.tear_down(task)
- power_mock.assert_called_once_with(task, states.POWER_OFF)
- self.assertEqual(driver_return, states.DELETED)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
+ self.assertEqual(states.ACTIVE, task.node.target_provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
autospec=True)
@@ -1105,36 +1045,3 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
-
- @mock.patch.object(flat_network.FlatNetwork, 'remove_provisioning_network',
- autospec=True)
- @mock.patch.object(flat_network.FlatNetwork, 'configure_tenant_networks',
- autospec=True)
- @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True)
- @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- return_value=states.POWER_OFF, autospec=True)
- @mock.patch.object(utils, 'node_power_action', autospec=True)
- def test_reboot_and_finish_deploy_with_smartnic_port(
- self, power_action_mock, get_pow_state_mock,
- power_on_node_if_needed_mock, restore_power_state_mock,
- configure_tenant_networks_mock, remove_provisioning_network_mock):
- d_info = self.node.driver_info
- d_info['deploy_forces_oob_reboot'] = True
- self.node.driver_info = d_info
- self.node.save()
- self.config(group='ansible',
- post_deploy_get_power_state_retry_interval=0)
- self.node.provision_state = states.DEPLOYING
- self.node.save()
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- with task_manager.acquire(self.context, self.node.uuid) as task:
- self.driver.reboot_and_finish_deploy(task)
- expected_power_calls = [((task, states.POWER_OFF),),
- ((task, states.POWER_ON),)]
- self.assertEqual(
- expected_power_calls, power_action_mock.call_args_list)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
- get_pow_state_mock.assert_not_called()
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_common.py b/ironic/tests/unit/drivers/modules/ilo/test_common.py
index f06e21af3..d5f486aba 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_common.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_common.py
@@ -436,6 +436,29 @@ class IloCommonMethodsTestCase(BaseIloTest):
get_ilo_object_mock.assert_called_once_with(self.node)
get_pending_boot_mode_mock.assert_called_once_with()
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test_get_current_boot_mode(self, get_ilo_object_mock):
+ ilo_object_mock = get_ilo_object_mock.return_value
+ get_current_boot_mode_mock = ilo_object_mock.get_current_boot_mode
+ get_current_boot_mode_mock.return_value = 'LEGACY'
+ ret = ilo_common.get_current_boot_mode(self.node)
+ self.assertEqual('bios', ret)
+ get_ilo_object_mock.assert_called_once_with(self.node)
+ get_current_boot_mode_mock.assert_called_once_with()
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test_get_current_boot_mode_fail(self, get_ilo_object_mock):
+ ilo_object_mock = get_ilo_object_mock.return_value
+ get_current_boot_mode_mock = ilo_object_mock.get_current_boot_mode
+ exc = ilo_error.IloError('error')
+ get_current_boot_mode_mock.side_effect = exc
+ self.assertRaises(exception.IloOperationError,
+ ilo_common.get_current_boot_mode, self.node)
+ get_ilo_object_mock.assert_called_once_with(self.node)
+ get_current_boot_mode_mock.assert_called_once_with()
+
@mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
autospec=True)
def test_update_boot_mode_instance_info_exists(self,
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_management.py b/ironic/tests/unit/drivers/modules/ilo/test_management.py
index 396df5ebe..e7cb060c9 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_management.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_management.py
@@ -16,10 +16,12 @@
from unittest import mock
+import ddt
from oslo_utils import importutils
from oslo_utils import uuidutils
from ironic.common import boot_devices
+from ironic.common import boot_modes
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
@@ -41,6 +43,7 @@ ilo_error = importutils.try_import('proliantutils.exception')
INFO_DICT = db_utils.get_test_ilo_info()
+@ddt.ddt
class IloManagementTestCase(test_common.BaseIloTest):
def setUp(self):
@@ -1181,6 +1184,88 @@ class IloManagementTestCase(test_common.BaseIloTest):
task.driver.management.inject_nmi,
task)
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ @ddt.data((ilo_common.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY,
+ ['bios']),
+ (ilo_common.SUPPORTED_BOOT_MODE_UEFI_ONLY,
+ ['uefi']),
+ (ilo_common.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI,
+ ['uefi', 'bios']))
+ @ddt.unpack
+ def test_get_supported_boot_modes(self, boot_modes_val,
+ exp_boot_modes,
+ get_ilo_object_mock):
+ ilo_object_mock = get_ilo_object_mock.return_value
+ ilo_object_mock.get_supported_boot_mode.return_value = boot_modes_val
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ supported_boot_modes = (
+ task.driver.management.get_supported_boot_modes(task))
+ self.assertEqual(exp_boot_modes, supported_boot_modes)
+
+ @mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
+ autospec=True)
+ @mock.patch.object(ilo_management.IloManagement,
+ 'get_supported_boot_modes',
+ spec_set=True, autospec=True)
+ def test_set_boot_mode(self, supp_boot_modes_mock,
+ set_boot_mode_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ exp_boot_modes = [boot_modes.UEFI, boot_modes.LEGACY_BIOS]
+ supp_boot_modes_mock.return_value = exp_boot_modes
+
+ for mode in exp_boot_modes:
+ task.driver.management.set_boot_mode(task, mode=mode)
+ supp_boot_modes_mock.assert_called_once_with(mock.ANY, task)
+ set_boot_mode_mock.assert_called_once_with(task.node, mode)
+ set_boot_mode_mock.reset_mock()
+ supp_boot_modes_mock.reset_mock()
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ @mock.patch.object(ilo_management.IloManagement,
+ 'get_supported_boot_modes',
+ spec_set=True, autospec=True)
+ def test_set_boot_mode_fail(self, supp_boot_modes_mock,
+ get_ilo_object_mock):
+ ilo_mock_obj = get_ilo_object_mock.return_value
+ ilo_mock_obj.get_pending_boot_mode.return_value = 'legacy'
+ exc = ilo_error.IloError('error')
+ ilo_mock_obj.set_pending_boot_mode.side_effect = exc
+ exp_boot_modes = [boot_modes.UEFI, boot_modes.LEGACY_BIOS]
+ supp_boot_modes_mock.return_value = exp_boot_modes
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaisesRegex(
+ exception.IloOperationError, 'uefi as boot mode failed',
+ task.driver.management.set_boot_mode, task, boot_modes.UEFI)
+ supp_boot_modes_mock.assert_called_once_with(mock.ANY, task)
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test_get_boot_mode(self, get_ilo_object_mock):
+ expected = 'bios'
+ ilo_mock_obj = get_ilo_object_mock.return_value
+ ilo_mock_obj.get_current_boot_mode.return_value = 'LEGACY'
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ response = task.driver.management.get_boot_mode(task)
+ self.assertEqual(expected, response)
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test_get_boot_mode_fail(self, get_ilo_object_mock):
+ ilo_mock_obj = get_ilo_object_mock.return_value
+ exc = ilo_error.IloError('error')
+ ilo_mock_obj.get_current_boot_mode.side_effect = exc
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertRaisesRegex(
+ exception.IloOperationError, 'Get current boot mode',
+ task.driver.management.get_boot_mode, task)
+
class Ilo5ManagementTestCase(db_base.DbTestCase):
diff --git a/ironic/tests/unit/drivers/modules/network/test_common.py b/ironic/tests/unit/drivers/modules/network/test_common.py
index 722db4685..eefbd8a9d 100644
--- a/ironic/tests/unit/drivers/modules/network/test_common.py
+++ b/ironic/tests/unit/drivers/modules/network/test_common.py
@@ -218,7 +218,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(
task, self.vif_id, {}, {'port_uuid': self.port.uuid}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[self.port.uuid],
[p.uuid for p in free_port_like_objs])
@@ -231,7 +231,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(
task, self.vif_id, {}, {'portgroup_uuid': pg1.uuid}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[pg1.uuid],
[p.uuid for p in free_port_like_objs])
@@ -244,7 +244,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(
task, self.vif_id, {}, {'portgroup_uuid': pg2.uuid}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[],
[p.uuid for p in free_port_like_objs])
@@ -258,7 +258,7 @@ class TestCommonFunctions(db_base.DbTestCase):
common._get_free_portgroups_and_ports(
task, self.vif_id, {},
{'port_uuid': uuidutils.generate_uuid()}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[],
[p.uuid for p in free_port_like_objs])
diff --git a/ironic/tests/unit/drivers/modules/network/test_flat.py b/ironic/tests/unit/drivers/modules/network/test_flat.py
index d8ea1a9a0..66c75441c 100644
--- a/ironic/tests/unit/drivers/modules/network/test_flat.py
+++ b/ironic/tests/unit/drivers/modules/network/test_flat.py
@@ -339,7 +339,10 @@ class TestFlatInterface(db_base.DbTestCase):
self.assertRaises(exception.UnsupportedDriverExtension,
self.interface.validate_inspection, task)
- def test_get_node_network_data(self):
+ @mock.patch.object(neutron, 'get_neutron_port_data', autospec=True)
+ def test_get_node_network_data(self, mock_gnpd):
+ mock_gnpd.return_value = {}
+
with task_manager.acquire(self.context, self.node.id) as task:
network_data = self.interface.get_node_network_data(task)
diff --git a/ironic/tests/unit/drivers/modules/network/test_neutron.py b/ironic/tests/unit/drivers/modules/network/test_neutron.py
index b3a64cb0b..4d8c5e7be 100644
--- a/ironic/tests/unit/drivers/modules/network/test_neutron.py
+++ b/ironic/tests/unit/drivers/modules/network/test_neutron.py
@@ -699,13 +699,15 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.node.save()
self._test_configure_tenant_networks(is_client_id=True)
+ @mock.patch.object(neutron_common, 'get_neutron_port_data', autospec=True)
@mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True)
@mock.patch.object(neutron_common, 'update_neutron_port', autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
@mock.patch.object(neutron_common, 'get_local_group_information',
autospec=True)
def test_configure_tenant_networks_with_portgroups(
- self, glgi_mock, client_mock, update_mock, wait_agent_mock):
+ self, glgi_mock, client_mock, update_mock, wait_agent_mock,
+ port_data_mock):
pg = utils.create_test_portgroup(
self.context, node_id=self.node.id, address='ff:54:00:cf:2d:32',
extra={'vif_port_id': uuidutils.generate_uuid()})
@@ -860,7 +862,10 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.assertRaises(exception.UnsupportedDriverExtension,
self.interface.validate_inspection, task)
- def test_get_node_network_data(self):
+ @mock.patch.object(neutron_common, 'get_neutron_port_data', autospec=True)
+ def test_get_node_network_data(self, mock_gnpd):
+ mock_gnpd.return_value = {}
+
with task_manager.acquire(self.context, self.node.id) as task:
network_data = self.interface.get_node_network_data(task)
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_boot.py b/ironic/tests/unit/drivers/modules/redfish/test_boot.py
index 51177c5ee..b365eb830 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_boot.py
@@ -340,7 +340,8 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
url = redfish_boot._prepare_iso_image(
task, 'http://kernel/img', 'http://ramdisk/img',
- 'http://bootloader/img', root_uuid=task.node.uuid)
+ 'http://bootloader/img', root_uuid=task.node.uuid,
+ base_iso=None)
object_name = 'boot-%s' % task.node.uuid
@@ -352,7 +353,8 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
boot_mode='uefi', esp_image_href='http://bootloader/img',
configdrive_href=mock.ANY,
kernel_params='nofb nomodeset vga=normal',
- root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
+ root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
+ base_iso=None)
self.assertEqual(expected_url, url)
@@ -381,7 +383,8 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
boot_mode=None, esp_image_href=None,
configdrive_href=mock.ANY,
kernel_params='nofb nomodeset vga=normal',
- root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
+ root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
+ base_iso=None)
self.assertEqual(expected_url, url)
@@ -397,14 +400,39 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
redfish_boot._prepare_iso_image(
task, 'http://kernel/img', 'http://ramdisk/img',
- bootloader_href=None, root_uuid=task.node.uuid)
+ bootloader_href=None, root_uuid=task.node.uuid,
+ base_iso=None)
mock_create_boot_iso.assert_called_once_with(
mock.ANY, mock.ANY, 'http://kernel/img', 'http://ramdisk/img',
boot_mode=None, esp_image_href=None,
configdrive_href=mock.ANY,
kernel_params=kernel_params,
- root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
+ root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
+ base_iso=None)
+
+ @mock.patch.object(redfish_boot, '_publish_image', autospec=True)
+ @mock.patch.object(images, 'create_boot_iso', autospec=True)
+ def test__prepare_iso_image_boot_iso(
+ self, mock_create_boot_iso, mock__publish_image):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+
+ task.node.instance_info = {'boot_iso': 'http://host/boot.iso',
+ 'capabilities': {
+ 'boot_option': 'ramdisk'}}
+
+ redfish_boot._prepare_iso_image(
+ task, None, None, root_uuid=None,
+ base_iso='http://host/boot.iso')
+
+ mock_create_boot_iso.assert_called_once_with(
+ mock.ANY, mock.ANY, None, None,
+ boot_mode=None, esp_image_href=None,
+ configdrive_href=None,
+ kernel_params=None,
+ root_uuid=None,
+ base_iso='http://host/boot.iso')
@mock.patch.object(redfish_boot, '_prepare_iso_image', autospec=True)
def test__prepare_deploy_iso(self, mock__prepare_iso_image):
@@ -474,7 +502,30 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock__prepare_iso_image.assert_called_once_with(
mock.ANY, 'http://kernel/img', 'http://ramdisk/img',
- 'bootloader', root_uuid=task.node.uuid)
+ 'bootloader', root_uuid=task.node.uuid, base_iso=None)
+
+ @mock.patch.object(redfish_boot, '_prepare_iso_image', autospec=True)
+ @mock.patch.object(images, 'create_boot_iso', autospec=True)
+ def test__prepare_boot_iso_user_supplied(self, mock_create_boot_iso,
+ mock__prepare_iso_image):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.driver_info.update(
+ {'deploy_kernel': 'kernel',
+ 'deploy_ramdisk': 'ramdisk',
+ 'bootloader': 'bootloader'}
+ )
+
+ task.node.instance_info.update(
+ {'boot_iso': 'http://boot/iso'})
+
+ redfish_boot._prepare_boot_iso(
+ task, root_uuid=task.node.uuid)
+
+ mock__prepare_iso_image.assert_called_once_with(
+ mock.ANY, None, None,
+ 'bootloader', root_uuid=task.node.uuid,
+ base_iso='http://boot/iso')
@mock.patch.object(redfish_utils, 'parse_driver_info', autospec=True)
@mock.patch.object(deploy_utils, 'validate_image_properties',
@@ -537,6 +588,64 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
@mock.patch.object(redfish_utils, 'parse_driver_info', autospec=True)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ autospec=True)
+ def test_validate_bios_boot_iso(self, mock_get_boot_mode,
+ mock_validate_image_properties,
+ mock_parse_driver_info):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.instance_info.update(
+ {'boot_iso': 'http://localhost/file.iso'}
+ )
+
+ task.node.driver_info.update(
+ {'deploy_kernel': 'kernel',
+ 'deploy_ramdisk': 'ramdisk',
+ 'bootloader': 'bootloader'}
+ )
+ # NOTE(TheJulia): Boot mode doesn't matter for this
+ # test scenario.
+ mock_get_boot_mode.return_value = 'bios'
+
+ task.driver.boot.validate(task)
+
+ mock_validate_image_properties.assert_called_once_with(
+ mock.ANY, mock.ANY, mock.ANY)
+
+ @mock.patch.object(redfish_utils, 'parse_driver_info', autospec=True)
+ @mock.patch.object(deploy_utils, 'validate_image_properties',
+ autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ autospec=True)
+ def test_validate_bios_boot_iso_conflicting_image_source(
+ self, mock_get_boot_mode,
+ mock_validate_image_properties,
+ mock_parse_driver_info):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.instance_info.update(
+ {'boot_iso': 'http://localhost/file.iso',
+ 'image_source': 'http://localhost/file.img'}
+ )
+
+ task.node.driver_info.update(
+ {'deploy_kernel': 'kernel',
+ 'deploy_ramdisk': 'ramdisk',
+ 'bootloader': 'bootloader'}
+ )
+ # NOTE(TheJulia): Boot mode doesn't matter for this
+ # test scenario.
+ mock_get_boot_mode.return_value = 'bios'
+
+ task.driver.boot.validate(task)
+
+ mock_validate_image_properties.assert_called_once_with(
+ mock.ANY, mock.ANY, mock.ANY)
+
+ @mock.patch.object(redfish_utils, 'parse_driver_info', autospec=True)
+ @mock.patch.object(deploy_utils, 'validate_image_properties',
+ autospec=True)
def test_validate_missing(self, mock_validate_image_properties,
mock_parse_driver_info):
with task_manager.acquire(self.context, self.node.uuid,
@@ -841,6 +950,85 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_boot_mode_utils.sync_boot_mode.assert_called_once_with(task)
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
+ 'clean_up_instance', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_boot_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, 'manager_utils', autospec=True)
+ @mock.patch.object(redfish_boot, 'deploy_utils', autospec=True)
+ @mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
+ def test_prepare_instance_ramdisk_boot_iso(
+ self, mock_boot_mode_utils, mock_deploy_utils, mock_manager_utils,
+ mock__parse_driver_info, mock__insert_vmedia, mock__eject_vmedia,
+ mock__prepare_boot_iso, mock_clean_up_instance):
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.provision_state = states.DEPLOYING
+ task.node.driver_internal_info[
+ 'root_uuid_or_disk_id'] = self.node.uuid
+ task.node.instance_info = {'boot_iso': 'http://host/boot.iso'}
+
+ mock_deploy_utils.get_boot_option.return_value = 'ramdisk'
+
+ mock__prepare_boot_iso.return_value = 'image-url'
+
+ task.driver.boot.prepare_instance(task)
+
+ mock__prepare_boot_iso.assert_called_once_with(task)
+
+ mock__eject_vmedia.assert_called_once_with(
+ task, sushy.VIRTUAL_MEDIA_CD)
+
+ mock__insert_vmedia.assert_called_once_with(
+ task, 'image-url', sushy.VIRTUAL_MEDIA_CD)
+
+ mock_manager_utils.node_set_boot_device.assert_called_once_with(
+ task, boot_devices.CDROM, persistent=True)
+
+ mock_boot_mode_utils.sync_boot_mode.assert_called_once_with(task)
+
+ @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
+ 'clean_up_instance', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_boot_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, 'manager_utils', autospec=True)
+ @mock.patch.object(redfish_boot, 'deploy_utils', autospec=True)
+ @mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
+ def test_prepare_instance_ramdisk_boot_iso_boot(
+ self, mock_boot_mode_utils, mock_deploy_utils, mock_manager_utils,
+ mock__parse_driver_info, mock__insert_vmedia, mock__eject_vmedia,
+ mock__prepare_boot_iso, mock_clean_up_instance):
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.provision_state = states.DEPLOYING
+ i_info = task.node.instance_info
+ i_info['boot_iso'] = "super-magic"
+ task.node.instance_info = i_info
+ mock_deploy_utils.get_boot_option.return_value = 'ramdisk'
+
+ mock__prepare_boot_iso.return_value = 'image-url'
+
+ task.driver.boot.prepare_instance(task)
+
+ mock__prepare_boot_iso.assert_called_once_with(task)
+
+ mock__eject_vmedia.assert_called_once_with(
+ task, sushy.VIRTUAL_MEDIA_CD)
+
+ mock__insert_vmedia.assert_called_once_with(
+ task, 'image-url', sushy.VIRTUAL_MEDIA_CD)
+
+ mock_manager_utils.node_set_boot_device.assert_called_once_with(
+ task, boot_devices.CDROM, persistent=True)
+
+ mock_boot_mode_utils.sync_boot_mode.assert_called_once_with(task)
+
@mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
@mock.patch.object(redfish_boot, '_cleanup_iso_image', autospec=True)
@mock.patch.object(redfish_boot, 'manager_utils', autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_management.py b/ironic/tests/unit/drivers/modules/redfish/test_management.py
index 70c1d5df9..60c9fd095 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_management.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_management.py
@@ -177,6 +177,37 @@ class RedfishManagementTestCase(db_base.DbTestCase):
self.assertNotIn('redfish_boot_device',
task.node.driver_internal_info)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test_set_boot_device_fail_no_change(self, mock_get_system):
+ fake_system = mock.Mock()
+ fake_system.set_system_boot_options.side_effect = (
+ sushy.exceptions.SushyError()
+ )
+ mock_get_system.return_value = fake_system
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ expected_values = [
+ (True, sushy.BOOT_SOURCE_ENABLED_CONTINUOUS),
+ (False, sushy.BOOT_SOURCE_ENABLED_ONCE)
+ ]
+
+ for target, expected in expected_values:
+ fake_system.boot.get.return_value = expected
+
+ self.assertRaisesRegex(
+ exception.RedfishError, 'Redfish set boot device',
+ task.driver.management.set_boot_device, task,
+ boot_devices.PXE, persistent=target)
+ fake_system.set_system_boot_options.assert_called_once_with(
+ sushy.BOOT_SOURCE_TARGET_PXE, enabled=None)
+ mock_get_system.assert_called_once_with(task.node)
+ self.assertNotIn('redfish_boot_device',
+ task.node.driver_internal_info)
+
+ # Reset mocks
+ fake_system.set_system_boot_options.reset_mock()
+ mock_get_system.reset_mock()
+
@mock.patch.object(sushy, 'Sushy', autospec=True)
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
def test_set_boot_device_persistence_fallback(self, mock_get_system,
diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py
index 57f7c4990..ecc734a9d 100644
--- a/ironic/tests/unit/drivers/modules/test_agent.py
+++ b/ironic/tests/unit/drivers/modules/test_agent.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import types
from unittest import mock
from oslo_config import cfg
@@ -36,7 +35,6 @@ from ironic.drivers.modules.network import flat as flat_network
from ironic.drivers.modules.network import neutron as neutron_network
from ironic.drivers.modules import pxe
from ironic.drivers.modules.storage import noop as noop_storage
-from ironic.drivers import utils as driver_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
@@ -453,11 +451,11 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertNotIn(
'deployment_reboot', task.node.driver_internal_info)
- @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
- def test_deploy_storage_should_write_image_false(self, mock_write,
- mock_pxe_instance):
+ def test_deploy_storage_should_write_image_false(
+ self, mock_write, mock_power):
mock_write.return_value = False
self.node.provision_state = states.DEPLOYING
self.node.deploy_step = {
@@ -467,7 +465,7 @@ class TestAgentDeploy(db_base.DbTestCase):
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.deploy(task)
self.assertIsNone(driver_return)
- self.assertTrue(mock_pxe_instance.called)
+ self.assertFalse(mock_power.called)
@mock.patch.object(agent_client.AgentClient, 'prepare_image',
autospec=True)
@@ -479,26 +477,15 @@ class TestAgentDeploy(db_base.DbTestCase):
mock_is_fast_track.return_value = True
self.node.target_provision_state = states.ACTIVE
self.node.provision_state = states.DEPLOYING
- test_temp_url = 'http://image'
- expected_image_info = {
- 'urls': [test_temp_url],
- 'id': 'fake-image',
- 'node_uuid': self.node.uuid,
- 'checksum': 'checksum',
- 'disk_format': 'qcow2',
- 'container_format': 'bare',
- 'stream_raw_images': CONF.agent.stream_raw_images,
- }
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
- self.assertEqual(states.DEPLOYWAIT, self.driver.deploy(task))
+ result = self.driver.deploy(task)
+ self.assertIsNone(result)
self.assertFalse(power_mock.called)
self.assertFalse(mock_pxe_instance.called)
- task.node.refresh()
- prepare_image_mock.assert_called_with(mock.ANY, task.node,
- expected_image_info)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertFalse(prepare_image_mock.called)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
@@ -1152,13 +1139,21 @@ class TestAgentDeploy(db_base.DbTestCase):
tear_down_cleaning_mock.assert_called_once_with(
task, manage_boot=False)
- def _test_continue_deploy(self, additional_driver_info=None,
- additional_expected_image_info=None):
+ def _test_write_image(self, additional_driver_info=None,
+ additional_expected_image_info=None,
+ compat=False):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
driver_info = self.node.driver_info
driver_info.update(additional_driver_info or {})
self.node.driver_info = driver_info
+ if not compat:
+ step = {'step': 'write_image', 'interface': 'deploy'}
+ dii = self.node.driver_internal_info
+ dii['agent_cached_deploy_steps'] = {
+ 'deploy': [step],
+ }
+ self.node.driver_internal_info = dii
self.node.save()
test_temp_url = 'http://image'
expected_image_info = {
@@ -1172,24 +1167,34 @@ class TestAgentDeploy(db_base.DbTestCase):
}
expected_image_info.update(additional_expected_image_info or {})
- client_mock = mock.MagicMock(spec_set=['prepare_image'])
+ client_mock = mock.MagicMock(spec_set=['prepare_image',
+ 'execute_deploy_step'])
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy._client = client_mock
- task.driver.deploy.continue_deploy(task)
-
- client_mock.prepare_image.assert_called_with(task.node,
- expected_image_info)
+ task.driver.deploy.write_image(task)
+
+ if compat:
+ client_mock.prepare_image.assert_called_with(
+ task.node, expected_image_info, wait=True)
+ else:
+ step['args'] = {'image_info': expected_image_info,
+ 'configdrive': None}
+ client_mock.execute_deploy_step.assert_called_once_with(
+ step, task.node, mock.ANY)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
- def test_continue_deploy(self):
- self._test_continue_deploy()
+ def test_write_image(self):
+ self._test_write_image()
- def test_continue_deploy_with_proxies(self):
- self._test_continue_deploy(
+ def test_write_image_compat(self):
+ self._test_write_image(compat=True)
+
+ def test_write_image_with_proxies(self):
+ self._test_write_image(
additional_driver_info={'image_https_proxy': 'https://spam.ni',
'image_http_proxy': 'spam.ni',
'image_no_proxy': '.eggs.com'},
@@ -1199,22 +1204,22 @@ class TestAgentDeploy(db_base.DbTestCase):
'no_proxy': '.eggs.com'}
)
- def test_continue_deploy_with_no_proxy_without_proxies(self):
- self._test_continue_deploy(
+ def test_write_image_with_no_proxy_without_proxies(self):
+ self._test_write_image(
additional_driver_info={'image_no_proxy': '.eggs.com'}
)
- def test_continue_deploy_image_source_is_url(self):
+ def test_write_image_image_source_is_url(self):
instance_info = self.node.instance_info
instance_info['image_source'] = 'http://example.com/woof.img'
self.node.instance_info = instance_info
- self._test_continue_deploy(
+ self._test_write_image(
additional_expected_image_info={
'id': 'woof.img'
}
)
- def test_continue_deploy_partition_image(self):
+ def test_write_image_partition_image(self):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
i_info = self.node.instance_info
@@ -1265,161 +1270,96 @@ class TestAgentDeploy(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy._client = client_mock
- task.driver.deploy.continue_deploy(task)
+ task.driver.deploy.write_image(task)
client_mock.prepare_image.assert_called_with(task.node,
- expected_image_info)
+ expected_image_info,
+ wait=True)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(deploy_utils, 'remove_http_instance_symlink',
autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance(self, check_deploy_mock,
- prepare_instance_mock, power_off_mock,
- get_power_state_mock, node_power_action_mock,
- uuid_mock, log_mock, remove_symlink_mock,
- power_on_node_if_needed_mock, resume_mock):
+ def test_prepare_instance_boot(self, prepare_instance_mock,
+ uuid_mock, log_mock, remove_symlink_mock):
self.config(manage_agent_boot=True, group='agent')
self.config(image_download_source='http', group='agent')
- check_deploy_mock.return_value = None
uuid_mock.return_value = {}
- self.node.provision_state = states.DEPLOYWAIT
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- get_power_state_mock.return_value = states.POWER_OFF
- power_on_node_if_needed_mock.return_value = None
task.node.driver_internal_info['is_whole_disk_image'] = True
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
+ task.driver.deploy.prepare_instance_boot(task)
uuid_mock.assert_called_once_with(mock.ANY, task.node)
self.assertNotIn('root_uuid_or_disk_id',
task.node.driver_internal_info)
self.assertFalse(log_mock.called)
prepare_instance_mock.assert_called_once_with(mock.ANY, task,
None, None, None)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertTrue(remove_symlink_mock.called)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_no_manage_agent_boot(
- self, check_deploy_mock, prepare_instance_mock, power_off_mock,
- get_power_state_mock, node_power_action_mock, uuid_mock,
- bootdev_mock, log_mock, power_on_node_if_needed_mock,
- resume_mock):
+ def test_prepare_instance_boot_no_manage_agent_boot(
+ self, prepare_instance_mock, uuid_mock,
+ bootdev_mock, log_mock):
self.config(manage_agent_boot=False, group='agent')
- check_deploy_mock.return_value = None
uuid_mock.return_value = {}
- self.node.provision_state = states.DEPLOYWAIT
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
+ task.driver.deploy.prepare_instance_boot(task)
uuid_mock.assert_called_once_with(mock.ANY, task.node)
self.assertNotIn('root_uuid_or_disk_id',
task.node.driver_internal_info)
self.assertFalse(log_mock.called)
self.assertFalse(prepare_instance_mock.called)
bootdev_mock.assert_called_once_with(task, 'disk', persistent=True)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_partition_image(self, check_deploy_mock,
- prepare_instance_mock,
- power_off_mock,
- get_power_state_mock,
- node_power_action_mock,
- uuid_mock, boot_mode_mock,
- log_mock,
- power_on_node_if_needed_mock,
- resume_mock):
- check_deploy_mock.return_value = None
+ def test_prepare_instance_boot_partition_image(self, prepare_instance_mock,
+ uuid_mock, boot_mode_mock,
+ log_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'netboot'}}
uuid_mock.return_value = {
'command_result': {'root uuid': 'root_uuid'}
}
- self.node.provision_state = states.DEPLOYWAIT
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
boot_mode_mock.return_value = 'bios'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
driver_internal_info = task.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
task.node.driver_internal_info = driver_internal_info
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
+ task.driver.deploy.prepare_instance_boot(task)
uuid_mock.assert_called_once_with(mock.ANY, task.node)
driver_int_info = task.node.driver_internal_info
self.assertEqual('root_uuid',
@@ -1430,18 +1370,9 @@ class TestAgentDeploy(db_base.DbTestCase):
task,
'root_uuid',
None, None)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
autospec=True)
@@ -1449,38 +1380,25 @@ class TestAgentDeploy(db_base.DbTestCase):
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_partition_image_compat(
- self, check_deploy_mock, prepare_instance_mock, power_off_mock,
- get_power_state_mock, node_power_action_mock, uuid_mock,
- old_uuid_mock, boot_mode_mock, log_mock,
- power_on_node_if_needed_mock, resume_mock):
- check_deploy_mock.return_value = None
+ def test_prepare_instance_boot_partition_image_compat(
+ self, prepare_instance_mock, uuid_mock,
+ old_uuid_mock, boot_mode_mock, log_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'netboot'}}
uuid_mock.side_effect = exception.AgentAPIError
old_uuid_mock.return_value = 'root_uuid'
- self.node.provision_state = states.DEPLOYWAIT
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
boot_mode_mock.return_value = 'bios'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
driver_internal_info = task.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
task.node.driver_internal_info = driver_internal_info
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
+ task.driver.deploy.prepare_instance_boot(task)
uuid_mock.assert_called_once_with(mock.ANY, task.node)
old_uuid_mock.assert_called_once_with(mock.ANY, task, 'root_uuid')
driver_int_info = task.node.driver_internal_info
@@ -1492,52 +1410,31 @@ class TestAgentDeploy(db_base.DbTestCase):
task,
'root_uuid',
None, None)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_partition_localboot_ppc64(
- self, check_deploy_mock, prepare_instance_mock,
- power_off_mock, get_power_state_mock,
- node_power_action_mock, uuid_mock, boot_mode_mock, log_mock,
- power_on_node_if_needed_mock, resume_mock):
- check_deploy_mock.return_value = None
+ def test_prepare_instance_boot_partition_localboot_ppc64(
+ self, prepare_instance_mock,
+ uuid_mock, boot_mode_mock, log_mock):
uuid_mock.return_value = {
'command_result': {
'root uuid': 'root_uuid',
'PReP Boot partition uuid': 'prep_boot_part_uuid',
}
}
- self.node.provision_state = states.DEPLOYWAIT
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
driver_internal_info = task.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
task.node.driver_internal_info = driver_internal_info
@@ -1547,9 +1444,8 @@ class TestAgentDeploy(db_base.DbTestCase):
properties.update(cpu_arch='ppc64le')
task.node.properties = properties
boot_mode_mock.return_value = 'bios'
- task.driver.deploy.reboot_to_instance(task)
+ task.driver.deploy.prepare_instance_boot(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
driver_int_info = task.node.driver_internal_info
self.assertEqual('root_uuid',
driver_int_info['root_uuid_or_disk_id']),
@@ -1558,99 +1454,39 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertFalse(log_mock.called)
prepare_instance_mock.assert_called_once_with(
mock.ANY, task, 'root_uuid', None, 'prep_boot_part_uuid')
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
- @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
- @mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
- autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
- @mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
- autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_boot_error(
- self, check_deploy_mock, prepare_instance_mock,
- power_off_mock, get_power_state_mock, node_power_action_mock,
- uuid_mock, collect_ramdisk_logs_mock, log_mock):
- check_deploy_mock.return_value = "Error"
- uuid_mock.return_value = None
- self.node.provision_state = states.DEPLOYWAIT
- self.node.target_provision_state = states.ACTIVE
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- get_power_state_mock.return_value = states.POWER_OFF
- task.node.driver_internal_info['is_whole_disk_image'] = True
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
- self.assertFalse(prepare_instance_mock.called)
- self.assertFalse(log_mock.called)
- self.assertFalse(power_off_mock.called)
- collect_ramdisk_logs_mock.assert_called_once_with(task.node)
- self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
-
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
- @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_localboot(self, check_deploy_mock,
- prepare_instance_mock,
- power_off_mock,
- get_power_state_mock,
- node_power_action_mock,
- uuid_mock, boot_mode_mock,
- log_mock,
- power_on_node_if_needed_mock,
- resume_mock):
- check_deploy_mock.return_value = None
+ def test_prepare_instance_boot_localboot(self, prepare_instance_mock,
+ uuid_mock, boot_mode_mock,
+ log_mock):
uuid_mock.return_value = {
'command_result': {
'root uuid': 'root_uuid',
'efi system partition uuid': 'efi_uuid',
}
}
- self.node.provision_state = states.DEPLOYWAIT
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
driver_internal_info = task.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
task.node.driver_internal_info = driver_internal_info
boot_option = {'capabilities': '{"boot_option": "local"}'}
task.node.instance_info = boot_option
boot_mode_mock.return_value = 'uefi'
- task.driver.deploy.reboot_to_instance(task)
+ task.driver.deploy.prepare_instance_boot(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
driver_int_info = task.node.driver_internal_info
self.assertEqual('root_uuid',
driver_int_info['root_uuid_or_disk_id']),
@@ -1659,106 +1495,24 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertFalse(log_mock.called)
prepare_instance_mock.assert_called_once_with(
mock.ANY, task, 'root_uuid', 'efi_uuid', None)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- resume_mock.assert_called_once_with(task)
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = []
- self.assertFalse(task.driver.deploy.deploy_has_started(task))
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started_is_done(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'prepare_image',
- 'command_status': 'SUCCESS'}]
- self.assertTrue(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started_did_start(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'prepare_image',
- 'command_status': 'RUNNING'}]
- self.assertTrue(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started_multiple_commands(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'cache_image',
- 'command_status': 'SUCCESS'},
- {'command_name': 'prepare_image',
- 'command_status': 'RUNNING'}]
- self.assertTrue(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started_other_commands(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'cache_image',
- 'command_status': 'SUCCESS'}]
- self.assertFalse(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_is_done(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'prepare_image',
- 'command_status': 'SUCCESS'}]
- self.assertTrue(task.driver.deploy.deploy_is_done(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_is_done_empty_response(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = []
- self.assertFalse(task.driver.deploy.deploy_is_done(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_is_done_race(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'some_other_command',
- 'command_status': 'SUCCESS'}]
- self.assertFalse(task.driver.deploy.deploy_is_done(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_is_done_still_running(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'prepare_image',
- 'command_status': 'RUNNING'}]
- self.assertFalse(task.driver.deploy.deploy_is_done(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_is_done_several_results(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [
- {'command_name': 'prepare_image', 'command_status': 'SUCCESS'},
- {'command_name': 'other_command', 'command_status': 'SUCCESS'},
- {'command_name': 'prepare_image', 'command_status': 'RUNNING'},
- ]
- self.assertFalse(task.driver.deploy.deploy_is_done(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
+ @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
+ @mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
- def test_deploy_is_done_not_the_last(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [
- {'command_name': 'prepare_image', 'command_status': 'SUCCESS'},
- {'command_name': 'other_command', 'command_status': 'SUCCESS'},
- ]
- self.assertTrue(task.driver.deploy.deploy_is_done(task))
+ def test_prepare_instance_boot_storage_should_write_image_with_smartnic(
+ self, mock_write, mock_pxe_instance):
+ mock_write.return_value = False
+ self.node.provision_state = states.DEPLOYING
+ self.node.deploy_step = {
+ 'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
+ self.node.save()
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ driver_return = self.driver.prepare_instance_boot(task)
+ self.assertIsNone(driver_return)
+ self.assertTrue(mock_pxe_instance.called)
@mock.patch.object(manager_utils, 'restore_power_state_if_needed',
autospec=True)
@@ -1856,31 +1610,6 @@ class TestAgentDeploy(db_base.DbTestCase):
self.context, self.node['uuid'], shared=False) as task:
self.assertEqual(0, len(task.volume_targets))
- @mock.patch.object(manager_utils, 'restore_power_state_if_needed',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
- @mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
- autospec=True)
- def test_deploy_storage_should_write_image_false_with_smartnic_port(
- self, mock_write, mock_pxe_instance,
- power_on_node_if_needed_mock, restore_power_state_mock):
- mock_write.return_value = False
- self.node.provision_state = states.DEPLOYING
- self.node.deploy_step = {
- 'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
- self.node.save()
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- driver_return = self.driver.deploy(task)
- self.assertIsNone(driver_return)
- self.assertTrue(mock_pxe_instance.called)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
-
class AgentRAIDTestCase(db_base.DbTestCase):
@@ -1920,6 +1649,18 @@ class AgentRAIDTestCase(db_base.DbTestCase):
self.assertEqual(0, ret[0]['priority'])
self.assertEqual(0, ret[1]['priority'])
+ @mock.patch.object(agent_base, 'get_steps', autospec=True)
+ def test_get_deploy_steps(self, get_steps_mock):
+ get_steps_mock.return_value = [
+ {'step': 'apply_configuration', 'interface': 'raid',
+ 'priority': 0},
+ ]
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ ret = task.driver.raid.get_deploy_steps(task)
+
+ self.assertEqual('apply_configuration', ret[0]['step'])
+
@mock.patch.object(raid, 'filter_target_raid_config', autospec=True)
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def test_create_configuration(self, execute_mock,
diff --git a/ironic/tests/unit/drivers/modules/test_agent_base.py b/ironic/tests/unit/drivers/modules/test_agent_base.py
index 160fef012..dac8e2fca 100644
--- a/ironic/tests/unit/drivers/modules/test_agent_base.py
+++ b/ironic/tests/unit/drivers/modules/test_agent_base.py
@@ -171,6 +171,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.deploy.heartbeat(task, 'url', '3.2.0')
+ self.assertIsNone(task.node.last_error)
self.assertFalse(task.shared)
self.assertEqual(
'url', task.node.driver_internal_info['agent_url'])
@@ -304,6 +305,44 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
self.assertFalse(rti_mock.called)
self.assertFalse(in_resume_deploy_mock.called)
+ @mock.patch.object(agent_base.HeartbeatMixin, 'process_next_step',
+ autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin,
+ 'in_core_deploy_step', autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin,
+ 'deploy_has_started', autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin,
+ 'deploy_is_done', autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin, 'continue_deploy',
+ autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin,
+ 'reboot_to_instance', autospec=True)
+ def test_heartbeat_decomposed_steps(self, rti_mock, cd_mock,
+ deploy_is_done_mock,
+ deploy_started_mock,
+ in_deploy_mock,
+ next_step_mock):
+ self.deploy.has_decomposed_deploy_steps = True
+ # Check that heartbeats do not trigger deployment actions when the
+ # driver has decomposed deploy steps.
+ self.node.provision_state = states.DEPLOYWAIT
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.deploy.heartbeat(task, 'url', '3.2.0')
+ self.assertFalse(task.shared)
+ self.assertEqual(
+ 'url', task.node.driver_internal_info['agent_url'])
+ self.assertEqual(
+ '3.2.0',
+ task.node.driver_internal_info['agent_version'])
+ self.assertFalse(in_deploy_mock.called)
+ self.assertFalse(deploy_started_mock.called)
+ self.assertFalse(deploy_is_done_mock.called)
+ self.assertFalse(cd_mock.called)
+ self.assertFalse(rti_mock.called)
+ self.assertTrue(next_step_mock.called)
+
@mock.patch.object(agent_base.HeartbeatMixin, 'continue_deploy',
autospec=True)
@mock.patch.object(agent_base.HeartbeatMixin,
@@ -849,8 +888,6 @@ class AgentRescueTests(AgentDeployMixinBaseTest):
class AgentDeployMixinTest(AgentDeployMixinBaseTest):
@mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -858,34 +895,27 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- def test_reboot_and_finish_deploy(
+ def test_tear_down_agent(
self, power_off_mock, get_power_state_mock,
- node_power_action_mock, collect_mock, resume_mock,
+ node_power_action_mock, collect_mock,
power_on_node_if_needed_mock):
cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent')
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.side_effect = [states.POWER_ON,
states.POWER_OFF]
power_on_node_if_needed_mock.return_value = None
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(2, get_power_state_mock.call_count)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertFalse(node_power_action_mock.called)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
collect_mock.assert_called_once_with(task.node)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -893,38 +923,23 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_soft_poweroff_doesnt_complete(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock,
- node_power_action_mock, mock_collect, resume_mock,
- power_on_node_if_needed_mock):
+ def test_tear_down_agent_soft_poweroff_doesnt_complete(
+ self, power_off_mock, get_power_state_mock,
+ node_power_action_mock, mock_collect):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- power_on_node_if_needed_mock.return_value = None
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.return_value = states.POWER_ON
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -932,35 +947,23 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_soft_poweroff_fails(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock, node_power_action_mock,
- mock_collect, resume_mock):
+ def test_tear_down_agent_soft_poweroff_fails(
+ self, power_off_mock, get_power_state_mock, node_power_action_mock,
+ mock_collect):
power_off_mock.side_effect = RuntimeError("boom")
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.return_value = states.POWER_ON
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -968,38 +971,26 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_soft_poweroff_race(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock, node_power_action_mock,
- mock_collect, resume_mock):
+ def test_tear_down_agent_soft_poweroff_race(
+ self, power_off_mock, get_power_state_mock, node_power_action_mock,
+ mock_collect):
# Test the situation when soft power off works, but ironic doesn't
# learn about it.
power_off_mock.side_effect = RuntimeError("boom")
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.side_effect = [states.POWER_ON,
states.POWER_OFF]
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertFalse(node_power_action_mock.called)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
@mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -1007,67 +998,21 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_get_power_state_fails(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock, node_power_action_mock,
- mock_collect, resume_mock, power_on_node_if_needed_mock):
+ def test_tear_down_agent_get_power_state_fails(
+ self, power_off_mock, get_power_state_mock, node_power_action_mock,
+ mock_collect, power_on_node_if_needed_mock):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.side_effect = RuntimeError("boom")
power_on_node_if_needed_mock.return_value = None
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- self.assertFalse(mock_collect.called)
-
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
- @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
- @mock.patch.object(time, 'sleep', lambda seconds: None)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.neutron.NeutronNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.neutron.NeutronNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_configure_tenant_network_exception(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock, node_power_action_mock,
- mock_collect, power_on_node_if_needed_mock):
- self.node.network_interface = 'neutron'
- self.node.provision_state = states.DEPLOYING
- self.node.target_provision_state = states.ACTIVE
- self.node.save()
- power_on_node_if_needed_mock.return_value = None
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- configure_tenant_net_mock.side_effect = exception.NetworkError(
- "boom")
- self.assertRaises(exception.InstanceDeployFailure,
- self.deploy.reboot_and_finish_deploy, task)
- self.assertEqual(7, get_power_state_mock.call_count)
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
@@ -1078,78 +1023,31 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- def test_reboot_and_finish_deploy_power_off_fails(
+ def test_tear_down_agent_power_off_fails(
self, power_off_mock, get_power_state_mock,
node_power_action_mock, mock_collect):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.return_value = states.POWER_ON
node_power_action_mock.side_effect = RuntimeError("boom")
self.assertRaises(exception.InstanceDeployFailure,
- self.deploy.reboot_and_finish_deploy,
+ self.deploy.tear_down_agent,
task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF)])
+ node_power_action_mock.assert_called_with(task, states.POWER_OFF)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
mock_collect.assert_called_once_with(task.node)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
- @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
- @mock.patch.object(time, 'sleep', lambda seconds: None)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_power_on_fails(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock,
- node_power_action_mock, mock_collect,
- power_on_node_if_needed_mock):
- self.node.provision_state = states.DEPLOYING
- self.node.target_provision_state = states.ACTIVE
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_ON
- node_power_action_mock.side_effect = [None,
- RuntimeError("boom")]
- self.assertRaises(exception.InstanceDeployFailure,
- self.deploy.reboot_and_finish_deploy,
- task)
- power_off_mock.assert_called_once_with(task.node)
- self.assertEqual(7, get_power_state_mock.call_count)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- self.assertFalse(mock_collect.called)
-
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'sync',
spec=types.FunctionType)
- def test_reboot_and_finish_deploy_power_action_oob_power_off(
- self, sync_mock, node_power_action_mock, mock_collect,
- resume_mock):
+ def test_tear_down_agent_power_action_oob_power_off(
+ self, sync_mock, node_power_action_mock, mock_collect):
# Enable force power off
driver_info = self.node.driver_info
driver_info['deploy_forces_oob_reboot'] = True
@@ -1158,30 +1056,23 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- self.deploy.reboot_and_finish_deploy(task)
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.tear_down_agent(task)
sync_mock.assert_called_once_with(task.node)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON),
- ])
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(agent_base.LOG, 'warning', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'sync',
spec=types.FunctionType)
- def test_reboot_and_finish_deploy_power_action_oob_power_off_failed(
- self, sync_mock, node_power_action_mock, log_mock, mock_collect,
- resume_mock):
+ def test_tear_down_agent_power_action_oob_power_off_failed(
+ self, sync_mock, node_power_action_mock, log_mock, mock_collect):
# Enable force power off
driver_info = self.node.driver_info
driver_info['deploy_forces_oob_reboot'] = True
@@ -1190,17 +1081,16 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ log_mock.reset_mock()
+
sync_mock.return_value = {'faultstring': 'Unknown command: blah'}
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
sync_mock.assert_called_once_with(task.node)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON),
- ])
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
log_error = ('The version of the IPA ramdisk used in the '
'deployment do not support the command "sync"')
@@ -1210,6 +1100,95 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
{'node': task.node.uuid, 'error': log_error})
self.assertFalse(mock_collect.called)
+ @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
+ @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
+ @mock.patch.object(time, 'sleep', lambda seconds: None)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(fake.FakePower, 'get_supported_power_states',
+ lambda self, task: [states.REBOOT])
+ @mock.patch.object(agent_client.AgentClient, 'sync', autospec=True)
+ def test_tear_down_agent_no_power_on_support(
+ self, sync_mock, node_power_action_mock, collect_mock,
+ power_on_node_if_needed_mock):
+ cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent')
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.tear_down_agent(task)
+ node_power_action_mock.assert_called_once_with(task, states.REBOOT)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
+ self.assertEqual(states.ACTIVE, task.node.target_provision_state)
+ collect_mock.assert_called_once_with(task.node)
+ self.assertFalse(power_on_node_if_needed_mock.called)
+ sync_mock.assert_called_once_with(self.deploy._client, task.node)
+
+ @mock.patch.object(manager_utils, 'restore_power_state_if_needed',
+ autospec=True)
+ @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
+ @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
+ 'remove_provisioning_network', spec_set=True, autospec=True)
+ @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
+ 'configure_tenant_networks', spec_set=True, autospec=True)
+ def test_switch_to_tenant_network(self, configure_tenant_net_mock,
+ remove_provisioning_net_mock,
+ power_on_node_if_needed_mock,
+ restore_power_state_mock):
+ power_on_node_if_needed_mock.return_value = states.POWER_OFF
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.switch_to_tenant_network(task)
+ remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
+ task)
+ configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
+ power_on_node_if_needed_mock.assert_called_once_with(task)
+ restore_power_state_mock.assert_called_once_with(
+ task, states.POWER_OFF)
+
+ @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
+ @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
+ 'remove_provisioning_network', spec_set=True, autospec=True)
+ @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
+ 'configure_tenant_networks', spec_set=True, autospec=True)
+ def test_switch_to_tenant_network_fails(self, configure_tenant_net_mock,
+ remove_provisioning_net_mock,
+ mock_collect):
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ configure_tenant_net_mock.side_effect = exception.NetworkError(
+ "boom")
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.deploy.switch_to_tenant_network, task)
+ remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
+ task)
+ configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
+ self.assertFalse(mock_collect.called)
+
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ def test_boot_instance(self, node_power_action_mock):
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.boot_instance(task)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_ON)
+
+ @mock.patch.object(fake.FakePower, 'get_supported_power_states',
+ lambda self, task: [states.REBOOT])
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ def test_boot_instance_no_power_on(self, node_power_action_mock):
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.boot_instance(task)
+ self.assertFalse(node_power_action_mock.called)
+
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@@ -2107,46 +2086,6 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
hook_returned = agent_base._get_post_step_hook(self.node, 'clean')
self.assertIsNone(hook_returned)
- @mock.patch.object(manager_utils, 'restore_power_state_if_needed',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
- @mock.patch.object(time, 'sleep', lambda seconds: None)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
- def test_reboot_and_finish_deploy_with_smartnic_port(
- self, power_off_mock, get_power_state_mock,
- node_power_action_mock, collect_mock, resume_mock,
- power_on_node_if_needed_mock, restore_power_state_mock):
- cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent')
- self.node.provision_state = states.DEPLOYING
- self.node.target_provision_state = states.ACTIVE
- self.node.deploy_step = {
- 'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- get_power_state_mock.side_effect = [states.POWER_ON,
- states.POWER_OFF]
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- self.deploy.reboot_and_finish_deploy(task)
- power_off_mock.assert_called_once_with(task.node)
- self.assertEqual(2, get_power_state_mock.call_count)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- collect_mock.assert_called_once_with(task.node)
- resume_mock.assert_called_once_with(task)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
-
class TestRefreshCleanSteps(AgentDeployMixinBaseTest):
@@ -2244,6 +2183,7 @@ class TestRefreshCleanSteps(AgentDeployMixinBaseTest):
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
+ log_mock.reset_mock()
self.deploy.refresh_steps(task, 'deploy')
client_mock.assert_called_once_with(mock.ANY, task.node,
@@ -2252,7 +2192,7 @@ class TestRefreshCleanSteps(AgentDeployMixinBaseTest):
task.node.driver_internal_info)
self.assertIsNone(task.node.driver_internal_info.get(
'agent_cached_deploy_steps'))
- self.assertFalse(log_mock.called)
+ log_mock.assert_not_called()
@mock.patch.object(agent_client.AgentClient, 'get_clean_steps',
autospec=True)
@@ -2383,6 +2323,23 @@ class StepMethodsTestCase(db_base.DbTestCase):
self.context, self.node.uuid, shared=False) as task:
self.assertEqual([], agent_base.get_steps(task, 'clean'))
+ def test_find_step(self):
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
+ step = agent_base.find_step(task, 'clean', 'deploy',
+ 'erase_devices')
+ self.assertEqual(self.clean_steps['deploy'][0], step)
+
+ def test_find_step_not_found(self):
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
+ self.assertIsNone(agent_base.find_step(
+ task, 'clean', 'non-deploy', 'erase_devices'))
+ self.assertIsNone(agent_base.find_step(
+ task, 'clean', 'deploy', 'something_else'))
+ self.assertIsNone(agent_base.find_step(
+ task, 'deploy', 'deploy', 'erase_devices'))
+
def test_get_deploy_steps(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
@@ -2390,18 +2347,35 @@ class StepMethodsTestCase(db_base.DbTestCase):
'agent_cached_deploy_steps': self.clean_steps
}
steps = self.deploy.get_deploy_steps(task)
- # 2 in-band steps + one out-of-band
- self.assertEqual(3, len(steps))
- self.assertIn(self.clean_steps['deploy'][0], steps)
- self.assertIn(self.clean_steps['deploy'][1], steps)
- self.assertNotIn(self.clean_steps['raid'][0], steps)
+ # 2 in-band steps + 3 out-of-band
+ expected = [
+ {'step': 'deploy', 'priority': 100, 'argsinfo': None,
+ 'interface': 'deploy'},
+ {'step': 'tear_down_agent', 'priority': 40, 'argsinfo': None,
+ 'interface': 'deploy'},
+ {'step': 'switch_to_tenant_network', 'priority': 30,
+ 'argsinfo': None, 'interface': 'deploy'},
+ {'step': 'boot_instance', 'priority': 20, 'argsinfo': None,
+ 'interface': 'deploy'},
+ ] + self.clean_steps['deploy']
+ self.assertCountEqual(expected, steps)
def test_get_deploy_steps_only_oob(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
steps = self.deploy.get_deploy_steps(task)
- # one out-of-band step
- self.assertEqual(1, len(steps))
+ # three base out-of-band steps
+ expected = [
+ {'step': 'deploy', 'priority': 100, 'argsinfo': None,
+ 'interface': 'deploy'},
+ {'step': 'tear_down_agent', 'priority': 40, 'argsinfo': None,
+ 'interface': 'deploy'},
+ {'step': 'switch_to_tenant_network', 'priority': 30,
+ 'argsinfo': None, 'interface': 'deploy'},
+ {'step': 'boot_instance', 'priority': 20, 'argsinfo': None,
+ 'interface': 'deploy'},
+ ]
+ self.assertCountEqual(expected, steps)
@mock.patch('ironic.objects.Port.list_by_node_id',
spec_set=types.FunctionType)
diff --git a/ironic/tests/unit/drivers/modules/test_agent_client.py b/ironic/tests/unit/drivers/modules/test_agent_client.py
index 22c857555..69aa03495 100644
--- a/ironic/tests/unit/drivers/modules/test_agent_client.py
+++ b/ironic/tests/unit/drivers/modules/test_agent_client.py
@@ -92,7 +92,7 @@ class TestAgentClient(base.TestCase):
def test__get_command_url_fail(self):
del self.node.driver_internal_info['agent_url']
- self.assertRaises(exception.IronicException,
+ self.assertRaises(exception.AgentConnectionFailed,
self.client._get_command_url,
self.node)
@@ -162,7 +162,7 @@ class TestAgentClient(base.TestCase):
method = 'foo.bar'
params = {}
- self.client._get_command_url(self.node)
+ url = self.client._get_command_url(self.node)
self.client._get_command_body(method, params)
e = self.assertRaises(exception.AgentConnectionFailed,
@@ -173,6 +173,12 @@ class TestAgentClient(base.TestCase):
'command %(method)s. Error: %(error)s' %
{'method': method, 'node': self.node.uuid,
'error': error}, str(e))
+ self.client.session.post.assert_called_with(
+ url,
+ data=mock.ANY,
+ params={'wait': 'false'},
+ timeout=60)
+ self.assertEqual(3, self.client.session.post.call_count)
def test__command_error_code(self):
response_text = {"faultstring": "you dun goofd"}
@@ -259,15 +265,21 @@ class TestAgentClient(base.TestCase):
timeout=CONF.agent.command_timeout)
def test_get_commands_status_retries(self):
- with mock.patch.object(self.client.session, 'get',
- autospec=True) as mock_get:
- res = mock.MagicMock(spec_set=['json'])
- res.json.return_value = {'commands': []}
- mock_get.side_effect = [
- requests.ConnectionError('boom'),
- res]
- self.assertEqual([], self.client.get_commands_status(self.node))
- self.assertEqual(2, mock_get.call_count)
+ res = mock.MagicMock(spec_set=['json'])
+ res.json.return_value = {'commands': []}
+ self.client.session.get.side_effect = [
+ requests.ConnectionError('boom'),
+ res
+ ]
+ self.assertEqual([], self.client.get_commands_status(self.node))
+ self.assertEqual(2, self.client.session.get.call_count)
+
+ def test_get_commands_status_no_retries(self):
+ self.client.session.get.side_effect = requests.ConnectionError('boom')
+ self.assertRaises(exception.AgentConnectionFailed,
+ self.client.get_commands_status, self.node,
+ retry_connection=False)
+ self.assertEqual(1, self.client.session.get.call_count)
def test_prepare_image(self):
self.client._command = mock.MagicMock(spec_set=[])
diff --git a/ironic/tests/unit/drivers/modules/test_agent_power.py b/ironic/tests/unit/drivers/modules/test_agent_power.py
new file mode 100644
index 000000000..0d4004c66
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/test_agent_power.py
@@ -0,0 +1,127 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+from unittest import mock
+
+from ironic.common import exception
+from ironic.common import states
+from ironic.conductor import task_manager
+from ironic.drivers.modules import agent_client
+from ironic.drivers.modules import agent_power
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.objects import utils as object_utils
+
+
+@mock.patch('time.sleep', lambda _sec: None)
+class AgentPowerTest(db_base.DbTestCase):
+
+ def setUp(self):
+ super(AgentPowerTest, self).setUp()
+ self.config(fast_track=True, group='deploy')
+ self.power = agent_power.AgentPower()
+ dii = {
+ 'agent_last_heartbeat': datetime.datetime.now().strftime(
+ "%Y-%m-%dT%H:%M:%S.%f"),
+ 'deployment_reboot': True,
+ 'agent_url': 'http://url',
+ 'agent_secret_token': 'very secret',
+ }
+ self.node = object_utils.create_test_node(
+ self.context, driver_internal_info=dii,
+ provision_state=states.DEPLOYING)
+ self.task = mock.Mock(spec=task_manager.TaskManager, node=self.node)
+
+ def test_basics(self):
+ self.assertEqual({}, self.power.get_properties())
+ self.assertFalse(self.power.supports_power_sync(self.task))
+ self.assertEqual([states.REBOOT, states.SOFT_REBOOT],
+ self.power.get_supported_power_states(self.task))
+
+ def test_validate(self):
+ self.power.validate(self.task)
+
+ def test_validate_fails(self):
+ self.node.driver_internal_info['agent_last_heartbeat'] = \
+ datetime.datetime(2010, 7, 19).strftime(
+ "%Y-%m-%dT%H:%M:%S.%f")
+ self.assertRaises(exception.InvalidParameterValue,
+ self.power.validate, self.task)
+
+ del self.node.driver_internal_info['agent_last_heartbeat']
+ self.assertRaises(exception.InvalidParameterValue,
+ self.power.validate, self.task)
+
+ def test_get_power_state(self):
+ self.assertEqual(states.POWER_ON,
+ self.power.get_power_state(self.task))
+
+ def test_get_power_state_unknown(self):
+ self.node.driver_internal_info['agent_last_heartbeat'] = \
+ datetime.datetime(2010, 7, 19).strftime(
+ "%Y-%m-%dT%H:%M:%S.%f")
+ self.assertIsNone(self.power.get_power_state(self.task))
+
+ del self.node.driver_internal_info['agent_last_heartbeat']
+ self.assertIsNone(self.power.get_power_state(self.task))
+
+ @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
+ autospec=True)
+ @mock.patch.object(agent_client.AgentClient, 'reboot', autospec=True)
+ def test_reboot(self, mock_reboot, mock_commands):
+ mock_commands.side_effect = [
+ [{'command_name': 'run_image', 'command_status': 'RUNNING'}],
+ exception.AgentConnectionFailed,
+ exception.AgentConnectionFailed,
+ [{'command_name': 'get_deploy_steps', 'command_status': 'RUNNING'}]
+ ]
+ with task_manager.acquire(self.context, self.node.id) as task:
+ # Save the node since the upgrade_lock call changes it
+ node = task.node
+ self.power.reboot(task)
+ mock_reboot.assert_called_once_with(self.power._client, node)
+ mock_commands.assert_called_with(self.power._client, node,
+ retry_connection=False,
+ expect_errors=True)
+ self.assertEqual(4, mock_commands.call_count)
+
+ node.refresh()
+ self.assertNotIn('agent_secret_token', node.driver_internal_info)
+ self.assertNotIn('agent_url', node.driver_internal_info)
+
+ @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
+ autospec=True)
+ @mock.patch.object(agent_client.AgentClient, 'reboot', autospec=True)
+ def test_reboot_timeout(self, mock_reboot, mock_commands):
+ mock_commands.side_effect = exception.AgentConnectionFailed
+ with task_manager.acquire(self.context, self.node.id) as task:
+ node = task.node
+ self.assertRaisesRegex(exception.PowerStateFailure,
+ 'Agent failed to come back',
+ self.power.reboot, task, timeout=0.001)
+ mock_commands.assert_called_with(self.power._client, node,
+ retry_connection=False,
+ expect_errors=True)
+
+ @mock.patch.object(agent_client.AgentClient, 'reboot', autospec=True)
+ def test_reboot_another_state(self, mock_reboot):
+ with task_manager.acquire(self.context, self.node.id) as task:
+ task.node.provision_state = states.DEPLOYWAIT
+ self.power.reboot(task)
+ mock_reboot.assert_called_once_with(self.power._client, task.node)
+
+ @mock.patch.object(agent_client.AgentClient, 'reboot', autospec=True)
+ def test_reboot_into_instance(self, mock_reboot):
+ with task_manager.acquire(self.context, self.node.id) as task:
+ del task.node.driver_internal_info['deployment_reboot']
+ self.power.reboot(task)
+ mock_reboot.assert_called_once_with(self.power._client, task.node)
diff --git a/ironic/tests/unit/drivers/modules/test_console_utils.py b/ironic/tests/unit/drivers/modules/test_console_utils.py
index 94e8a260d..752fa5fd1 100644
--- a/ironic/tests/unit/drivers/modules/test_console_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_console_utils.py
@@ -19,6 +19,7 @@
import errno
import fcntl
+import ipaddress
import os
import random
import signal
@@ -31,7 +32,6 @@ from unittest import mock
from ironic_lib import utils as ironic_utils
from oslo_config import cfg
from oslo_service import loopingcall
-from oslo_utils import netutils
import psutil
from ironic.common import exception
@@ -223,7 +223,7 @@ class ConsoleUtilsTestCase(db_base.DbTestCase):
generated_url = (
console_utils.get_shellinabox_console_url(self.info['port']))
console_host = CONF.my_ip
- if netutils.is_valid_ipv6(console_host):
+ if ipaddress.ip_address(console_host).version == 6:
console_host = '[%s]' % console_host
http_url = "%s://%s:%s" % (scheme, console_host, self.info['port'])
self.assertEqual(http_url, generated_url)
diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
index a982128ce..0307fd8d2 100644
--- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
@@ -582,6 +582,34 @@ class GetPxeBootConfigTestCase(db_base.DbTestCase):
result = utils.get_pxe_boot_file(self.node)
self.assertEqual('bios-bootfile', result)
+ def test_get_ipxe_boot_file(self):
+ self.config(ipxe_bootfile_name='meow', group='pxe')
+ result = utils.get_ipxe_boot_file(self.node)
+ self.assertEqual('meow', result)
+
+ def test_get_ipxe_boot_file_uefi(self):
+ self.config(uefi_ipxe_bootfile_name='ipxe-uefi-bootfile', group='pxe')
+ properties = {'capabilities': 'boot_mode:uefi'}
+ self.node.properties = properties
+ result = utils.get_ipxe_boot_file(self.node)
+ self.assertEqual('ipxe-uefi-bootfile', result)
+
+ def test_get_ipxe_boot_file_other_arch(self):
+ arch_names = {'aarch64': 'ipxe-aa64.efi',
+ 'x86_64': 'ipxe.kpxe'}
+ self.config(ipxe_bootfile_name_by_arch=arch_names, group='pxe')
+ properties = {'cpu_arch': 'aarch64', 'capabilities': 'boot_mode:uefi'}
+ self.node.properties = properties
+ result = utils.get_ipxe_boot_file(self.node)
+ self.assertEqual('ipxe-aa64.efi', result)
+
+ def test_get_ipxe_boot_file_fallback(self):
+ self.config(ipxe_bootfile_name=None, group='pxe')
+ self.config(uefi_ipxe_bootfile_name=None, group='pxe')
+ self.config(pxe_bootfile_name='lolcat', group='pxe')
+ result = utils.get_ipxe_boot_file(self.node)
+ self.assertEqual('lolcat', result)
+
def test_get_pxe_config_template_emtpy_property(self):
self.node.properties = {}
self.config(pxe_config_template_by_arch=self.template_by_arch,
@@ -597,6 +625,28 @@ class GetPxeBootConfigTestCase(db_base.DbTestCase):
result = utils.get_pxe_config_template(node)
self.assertEqual('fake-template', result)
+ def test_get_ipxe_config_template(self):
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware')
+ self.assertIn('ipxe_config.template',
+ utils.get_ipxe_config_template(node))
+
+ def test_get_ipxe_config_template_none(self):
+ self.config(ipxe_config_template=None, group='pxe')
+ self.config(pxe_config_template='magical_bootloader',
+ group='pxe')
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware')
+ self.assertEqual('magical_bootloader',
+ utils.get_ipxe_config_template(node))
+
+ def test_get_ipxe_config_template_override_pxe_fallback(self):
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_info={'pxe_template': 'magical'})
+ self.assertEqual('magical',
+ utils.get_ipxe_config_template(node))
+
@mock.patch('time.sleep', lambda sec: None)
class OtherFunctionTestCase(db_base.DbTestCase):
@@ -1347,6 +1397,20 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
inst_info, ['kernel', 'ramdisk'])
self.assertEqual(expected_error, str(error))
+ def test_validate_image_properties_boot_iso_conflict(self):
+ instance_info = {
+ 'image_source': 'http://ubuntu',
+ 'boot_iso': 'http://ubuntu.iso',
+ }
+ expected_error = ("An 'image_source' and 'boot_iso' "
+ "parameter may not be specified at "
+ "the same time.")
+ error = self.assertRaises(exception.InvalidParameterValue,
+ utils.validate_image_properties,
+ self.context,
+ instance_info, [])
+ self.assertEqual(expected_error, str(error))
+
class ValidateParametersTestCase(db_base.DbTestCase):
diff --git a/ironic/tests/unit/drivers/modules/test_ipmitool.py b/ironic/tests/unit/drivers/modules/test_ipmitool.py
index ad41d0ba1..e45aee26f 100644
--- a/ironic/tests/unit/drivers/modules/test_ipmitool.py
+++ b/ironic/tests/unit/drivers/modules/test_ipmitool.py
@@ -1057,7 +1057,7 @@ class IPMIToolPrivateMethodTestCase(
'-U', self.info['username'],
'-v',
'-R', '1',
- '-N', '1',
+ '-N', '5',
'-f', awesome_password_filename,
'A', 'B', 'C',
]
@@ -1075,6 +1075,33 @@ class IPMIToolPrivateMethodTestCase(
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub)
@mock.patch.object(utils, 'execute', autospec=True)
+ def test__exec_ipmitool_with_timeout(
+ self, mock_exec, mock_support):
+ args = [
+ 'ipmitool',
+ '-I', 'lanplus',
+ '-H', self.info['address'],
+ '-L', self.info['priv_level'],
+ '-U', self.info['username'],
+ '-v',
+ '-R', '12',
+ '-N', '5',
+ '-f', awesome_password_filename,
+ 'A', 'B', 'C',
+ ]
+
+ mock_support.return_value = True
+ mock_exec.return_value = (None, None)
+
+ self.config(use_ipmitool_retries=True, group='ipmi')
+ ipmi._exec_ipmitool(self.info, 'A B C', kill_on_timeout=True)
+
+ mock_support.assert_called_once_with('timing')
+ mock_exec.assert_called_once_with(*args, timeout=60)
+
+ @mock.patch.object(ipmi, '_is_option_supported', autospec=True)
+ @mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub)
+ @mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_with_ironic_retries_multiple(
self, mock_exec, mock_support):
@@ -1101,14 +1128,6 @@ class IPMIToolPrivateMethodTestCase(
mock_support.assert_called_once_with('timing')
self.assertEqual(3, mock_exec.call_count)
- def test__exec_ipmitool_wait(self):
- mock_popen = mock.MagicMock()
- mock_popen.poll.side_effect = [1, 1, 1, 1, 1]
- ipmi._exec_ipmitool_wait(1, {'uuid': ''}, mock_popen)
-
- self.assertTrue(mock_popen.terminate.called)
- self.assertTrue(mock_popen.kill.called)
-
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub)
@mock.patch.object(utils, 'execute', autospec=True)
diff --git a/ironic/tests/unit/drivers/modules/test_ipxe.py b/ironic/tests/unit/drivers/modules/test_ipxe.py
index 4385be74b..1aad6b7a3 100644
--- a/ironic/tests/unit/drivers/modules/test_ipxe.py
+++ b/ironic/tests/unit/drivers/modules/test_ipxe.py
@@ -309,14 +309,9 @@ class iPXEBootTestCase(db_base.DbTestCase):
mock_cache_r_k.assert_called_once_with(
task, {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'},
ipxe_enabled=True)
- if uefi:
- mock_pxe_config.assert_called_once_with(
- task, {}, CONF.pxe.uefi_pxe_config_template,
- ipxe_enabled=True)
- else:
- mock_pxe_config.assert_called_once_with(
- task, {}, CONF.pxe.pxe_config_template,
- ipxe_enabled=True)
+ mock_pxe_config.assert_called_once_with(
+ task, {}, CONF.pxe.ipxe_config_template,
+ ipxe_enabled=True)
def test_prepare_ramdisk(self):
self.node.provision_state = states.DEPLOYING
@@ -699,7 +694,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
- task, mock.ANY, CONF.pxe.pxe_config_template,
+ task, mock.ANY, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
@@ -816,7 +811,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
self.assertFalse(cache_mock.called)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
- task, mock.ANY, CONF.pxe.pxe_config_template,
+ task, mock.ANY, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None, boot_modes.LEGACY_BIOS, False,
diff --git a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py
index d980ef8ca..17ff14153 100644
--- a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py
+++ b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py
@@ -348,8 +348,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
'node': self.node.uuid,
'params': log_params,
}
- uuid_dict_returned = {'root uuid': '12345678-87654321'}
- deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'root uuid': '12345678-87654321'}
+ deploy_mock.return_value = deployment_uuids
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -362,7 +362,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertIsNone(task.node.last_error)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
- self.assertEqual(uuid_dict_returned, retval)
+ self.assertEqual(deployment_uuids, retval)
mock_disk_layout.assert_called_once_with(task.node, mock.ANY)
@mock.patch.object(iscsi_deploy, 'LOG', autospec=True)
@@ -392,8 +392,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
'node': self.node.uuid,
'params': log_params,
}
- uuid_dict_returned = {'disk identifier': '87654321'}
- deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'disk identifier': '87654321'}
+ deploy_mock.return_value = deployment_uuids
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
@@ -406,7 +406,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertIsNone(task.node.last_error)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
- self.assertEqual(uuid_dict_returned, retval)
+ self.assertEqual(deployment_uuids, retval)
def _test_get_deploy_info(self, extra_instance_info=None):
if extra_instance_info is None:
@@ -489,8 +489,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
driver_internal_info = {'agent_url': 'http://1.2.3.4:1234'}
self.node.driver_internal_info = driver_internal_info
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid'}
- continue_deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
+ continue_deploy_mock.return_value = deployment_uuids
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
with task_manager.acquire(self.context, self.node.uuid,
@@ -504,7 +504,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertEqual(
'some-root-uuid',
task.node.driver_internal_info['root_uuid_or_disk_id'])
- self.assertEqual(ret_val, uuid_dict_returned)
+ self.assertEqual(ret_val, deployment_uuids)
@mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True)
def test_do_agent_iscsi_deploy_preserve_ephemeral(self,
@@ -517,8 +517,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
'agent_url': 'http://1.2.3.4:1234'}
self.node.driver_internal_info = driver_internal_info
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid'}
- continue_deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
+ continue_deploy_mock.return_value = deployment_uuids
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
with task_manager.acquire(self.context, self.node.uuid,
@@ -831,54 +831,31 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
self.assertNotIn(
'deployment_reboot', task.node.driver_internal_info)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
- @mock.patch.object(flat_network.FlatNetwork,
- 'configure_tenant_networks',
- spec_set=True, autospec=True)
- @mock.patch.object(flat_network.FlatNetwork,
- 'remove_provisioning_network',
- spec_set=True, autospec=True)
- @mock.patch.object(pxe.PXEBoot,
- 'prepare_instance',
- spec_set=True, autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
- @mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True)
- def test_deploy_storage_check_write_image_false(self,
- mock_cache_instance_image,
- mock_check_image_size,
- mock_node_power_action,
- mock_prepare_instance,
- mock_remove_network,
- mock_tenant_network,
- mock_write):
+ def test_deploy_storage_should_write_image_false(
+ self, mock_write, mock_node_power_action):
mock_write.return_value = False
self.node.provision_state = states.DEPLOYING
self.node.deploy_step = {
'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
self.node.save()
- with task_manager.acquire(self.context,
- self.node.uuid, shared=False) as task:
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
ret = task.driver.deploy.deploy(task)
self.assertIsNone(ret)
- self.assertFalse(mock_cache_instance_image.called)
- self.assertFalse(mock_check_image_size.called)
- mock_remove_network.assert_called_once_with(mock.ANY, task)
- mock_tenant_network.assert_called_once_with(mock.ANY, task)
- mock_prepare_instance.assert_called_once_with(mock.ANY, task)
- self.assertEqual(2, mock_node_power_action.call_count)
- self.assertEqual(states.DEPLOYING, task.node.provision_state)
+ self.assertFalse(mock_node_power_action.called)
@mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
@mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True)
- @mock.patch.object(iscsi_deploy.ISCSIDeploy, 'continue_deploy',
+ @mock.patch.object(iscsi_deploy.ISCSIDeploy, 'write_image',
autospec=True)
@mock.patch('ironic.conductor.utils.is_fast_track', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_deploy_fast_track(self, power_mock, mock_pxe_instance,
- mock_is_fast_track, continue_deploy_mock,
+ mock_is_fast_track, write_image_mock,
cache_image_mock, check_image_size_mock):
mock_is_fast_track.return_value = True
self.node.target_provision_state = states.ACTIVE
@@ -889,16 +866,17 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
- task.driver.deploy.deploy(task)
+ result = task.driver.deploy.deploy(task)
+ self.assertIsNone(result)
self.assertFalse(power_mock.called)
self.assertFalse(mock_pxe_instance.called)
task.node.refresh()
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
cache_image_mock.assert_called_with(mock.ANY, task.node)
check_image_size_mock.assert_called_with(task)
- continue_deploy_mock.assert_called_with(mock.ANY, task)
+ self.assertFalse(write_image_mock.called)
@mock.patch.object(noop_storage.NoopStorage, 'detach_volumes',
autospec=True)
@@ -995,90 +973,95 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
agent_execute_clean_step_mock.assert_called_once_with(
task, {'some-step': 'step-info'}, 'clean')
- @mock.patch.object(agent_base.AgentDeployMixin,
- 'reboot_and_finish_deploy', autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
- def test_continue_deploy_netboot(self, do_agent_iscsi_deploy_mock,
- reboot_and_finish_deploy_mock):
+ def test_write_image(self, do_agent_iscsi_deploy_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'netboot'}}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid'}
- do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
+ do_agent_iscsi_deploy_mock.return_value = deployment_uuids
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.deploy.write_image(task)
+ do_agent_iscsi_deploy_mock.assert_called_once_with(
+ task, task.driver.deploy._client)
+ self.assertEqual(
+ task.node.driver_internal_info['deployment_uuids'],
+ deployment_uuids)
+
+ @mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
+ autospec=True)
+ @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
+ def test_write_image_bfv(self, do_agent_iscsi_deploy_mock,
+ should_write_image_mock):
+ should_write_image_mock.return_value = False
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.deploy.write_image(task)
+ self.assertFalse(do_agent_iscsi_deploy_mock.called)
+
+ def test_prepare_instance_boot_netboot(self):
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
+ self.node.instance_info = {
+ 'capabilities': {'boot_option': 'netboot'}}
+ self.node.provision_state = states.DEPLOYWAIT
+ self.node.target_provision_state = states.ACTIVE
+ info = self.node.driver_internal_info
+ info['deployment_uuids'] = deployment_uuids
+ self.node.driver_internal_info = info
+ self.node.save()
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.object(
task.driver.boot, 'prepare_instance') as m_prep_instance:
- task.driver.deploy.continue_deploy(task)
- do_agent_iscsi_deploy_mock.assert_called_once_with(
- task, task.driver.deploy._client)
- reboot_and_finish_deploy_mock.assert_called_once_with(
- mock.ANY, task)
+ task.driver.deploy.prepare_instance_boot(task)
m_prep_instance.assert_called_once_with(task)
@mock.patch.object(fake.FakeManagement, 'set_boot_device', autospec=True)
@mock.patch.object(agent_base.AgentDeployMixin,
- 'reboot_and_finish_deploy', autospec=True)
- @mock.patch.object(agent_base.AgentDeployMixin,
'configure_local_boot', autospec=True)
- @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
- def test_continue_deploy_localboot(self, do_agent_iscsi_deploy_mock,
- configure_local_boot_mock,
- reboot_and_finish_deploy_mock,
- set_boot_device_mock):
+ def test_prepare_instance_boot_localboot(self, configure_local_boot_mock,
+ set_boot_device_mock):
- self.node.instance_info = {
- 'capabilities': {'boot_option': 'local'}}
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
+ info = self.node.driver_internal_info
+ info['deployment_uuids'] = deployment_uuids
+ self.node.driver_internal_info = info
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid'}
- do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
with task_manager.acquire(self.context, self.node.uuid) as task:
- task.driver.deploy.continue_deploy(task)
- do_agent_iscsi_deploy_mock.assert_called_once_with(
- task, task.driver.deploy._client)
+ task.driver.deploy.prepare_instance_boot(task)
configure_local_boot_mock.assert_called_once_with(
task.driver.deploy, task, root_uuid='some-root-uuid',
efi_system_part_uuid=None, prep_boot_part_uuid=None)
- reboot_and_finish_deploy_mock.assert_called_once_with(
- task.driver.deploy, task)
set_boot_device_mock.assert_called_once_with(
mock.ANY, task, device=boot_devices.DISK, persistent=True)
@mock.patch.object(fake.FakeManagement, 'set_boot_device', autospec=True)
@mock.patch.object(agent_base.AgentDeployMixin,
- 'reboot_and_finish_deploy', autospec=True)
- @mock.patch.object(agent_base.AgentDeployMixin,
'configure_local_boot', autospec=True)
- @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
- def test_continue_deploy_localboot_uefi(self, do_agent_iscsi_deploy_mock,
- configure_local_boot_mock,
- reboot_and_finish_deploy_mock,
- set_boot_device_mock):
-
+ def test_prepare_instance_boot_localboot_uefi(
+ self, configure_local_boot_mock, set_boot_device_mock):
+ deployment_uuids = {'root uuid': 'some-root-uuid',
+ 'efi system partition uuid': 'efi-part-uuid'}
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
+ info = self.node.driver_internal_info
+ info['deployment_uuids'] = deployment_uuids
+ self.node.driver_internal_info = info
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid',
- 'efi system partition uuid': 'efi-part-uuid'}
- do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
with task_manager.acquire(self.context, self.node.uuid) as task:
- task.driver.deploy.continue_deploy(task)
- do_agent_iscsi_deploy_mock.assert_called_once_with(
- task, task.driver.deploy._client)
+ task.driver.deploy.prepare_instance_boot(task)
configure_local_boot_mock.assert_called_once_with(
task.driver.deploy, task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-part-uuid', prep_boot_part_uuid=None)
- reboot_and_finish_deploy_mock.assert_called_once_with(
- task.driver.deploy, task)
set_boot_device_mock.assert_called_once_with(
mock.ANY, task, device=boot_devices.DISK, persistent=True)
@@ -1157,49 +1140,6 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
self.node.uuid, shared=False) as task:
self.assertEqual(0, len(task.volume_targets))
- @mock.patch.object(manager_utils, 'restore_power_state_if_needed',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
- @mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
- autospec=True)
- @mock.patch.object(flat_network.FlatNetwork,
- 'configure_tenant_networks',
- spec_set=True, autospec=True)
- @mock.patch.object(flat_network.FlatNetwork,
- 'remove_provisioning_network',
- spec_set=True, autospec=True)
- @mock.patch.object(pxe.PXEBoot,
- 'prepare_instance',
- spec_set=True, autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
- @mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True)
- def test_deploy_storage_check_write_image_false_with_smartnic_port(
- self, mock_cache_instance_image, mock_check_image_size,
- mock_node_power_action, mock_prepare_instance,
- mock_remove_network, mock_tenant_network, mock_write,
- power_on_node_if_needed_mock, restore_power_state_mock):
- mock_write.return_value = False
- self.node.provision_state = states.DEPLOYING
- self.node.deploy_step = {
- 'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
- self.node.save()
- with task_manager.acquire(
- self.context, self.node.uuid, shared=False) as task:
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- ret = task.driver.deploy.deploy(task)
- self.assertIsNone(ret)
- self.assertFalse(mock_cache_instance_image.called)
- self.assertFalse(mock_check_image_size.called)
- mock_remove_network.assert_called_once_with(mock.ANY, task)
- mock_tenant_network.assert_called_once_with(mock.ANY, task)
- mock_prepare_instance.assert_called_once_with(mock.ANY, task)
- self.assertEqual(2, mock_node_power_action.call_count)
- self.assertEqual(states.DEPLOYING, task.node.provision_state)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
-
# Cleanup of iscsi_deploy with pxe boot interface
class CleanUpFullFlowTestCase(db_base.DbTestCase):
diff --git a/ironic/tests/unit/drivers/test_drac.py b/ironic/tests/unit/drivers/test_drac.py
index 748c5e466..8a551070b 100644
--- a/ironic/tests/unit/drivers/test_drac.py
+++ b/ironic/tests/unit/drivers/test_drac.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from oslo_utils import uuidutils
+
from ironic.conductor import task_manager
from ironic.drivers.modules import agent
from ironic.drivers.modules import drac
@@ -42,7 +44,7 @@ class IDRACHardwareTestCase(db_base.DbTestCase):
'no-inspect'],
enabled_network_interfaces=['flat', 'neutron', 'noop'],
enabled_raid_interfaces=[
- 'idrac', 'idrac-wsman', 'no-raid'],
+ 'idrac', 'idrac-wsman', 'no-raid', 'agent'],
enabled_vendor_interfaces=[
'idrac', 'idrac-wsman', 'no-vendor'],
enabled_bios_interfaces=[
@@ -108,11 +110,14 @@ class IDRACHardwareTestCase(db_base.DbTestCase):
inspect=inspector.Inspector)
def test_override_with_raid(self):
- node = obj_utils.create_test_node(self.context, driver='idrac',
- raid_interface='no-raid')
- with task_manager.acquire(self.context, node.id) as task:
- self._validate_interfaces(task.driver,
- raid=noop.NoRAID)
+ for iface, impl in [('agent', agent.AgentRAID),
+ ('no-raid', noop.NoRAID)]:
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='idrac',
+ raid_interface=iface)
+ with task_manager.acquire(self.context, node.id) as task:
+ self._validate_interfaces(task.driver, raid=impl)
def test_override_no_vendor(self):
node = obj_utils.create_test_node(self.context, driver='idrac',
diff --git a/ironic/tests/unit/drivers/test_ilo.py b/ironic/tests/unit/drivers/test_ilo.py
index ac719b763..3e8526436 100644
--- a/ironic/tests/unit/drivers/test_ilo.py
+++ b/ironic/tests/unit/drivers/test_ilo.py
@@ -16,10 +16,11 @@
Test class for iLO Drivers
"""
+from oslo_utils import uuidutils
+
from ironic.conductor import task_manager
from ironic.drivers import ilo
from ironic.drivers.modules import agent
-from ironic.drivers.modules.ilo import management
from ironic.drivers.modules.ilo import raid
from ironic.drivers.modules import inspector
from ironic.drivers.modules import iscsi_deploy
@@ -187,16 +188,6 @@ class Ilo5HardwareTestCase(db_base.DbTestCase):
def test_default_interfaces(self):
node = obj_utils.create_test_node(self.context, driver='ilo5')
with task_manager.acquire(self.context, node.id) as task:
- self.assertIsInstance(task.driver.raid, raid.Ilo5RAID)
- self.assertIsInstance(task.driver.management,
- management.Ilo5Management)
-
- def test_override_with_no_raid(self):
- self.config(enabled_raid_interfaces=['no-raid', 'ilo5'])
- node = obj_utils.create_test_node(self.context, driver='ilo5',
- raid_interface='no-raid')
- with task_manager.acquire(self.context, node.id) as task:
- self.assertIsInstance(task.driver.raid, noop.NoRAID)
self.assertIsInstance(task.driver.boot,
ilo.boot.IloVirtualMediaBoot)
self.assertIsInstance(task.driver.console,
@@ -209,7 +200,19 @@ class Ilo5HardwareTestCase(db_base.DbTestCase):
ilo.management.IloManagement)
self.assertIsInstance(task.driver.power,
ilo.power.IloPower)
+ self.assertIsInstance(task.driver.raid, raid.Ilo5RAID)
self.assertIsInstance(task.driver.rescue,
noop.NoRescue)
self.assertIsInstance(task.driver.vendor,
ilo.vendor.VendorPassthru)
+
+ def test_override_raid(self):
+ self.config(enabled_raid_interfaces=['agent', 'no-raid', 'ilo5'])
+ for iface, impl in [('agent', agent.AgentRAID),
+ ('no-raid', noop.NoRAID)]:
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='ilo5',
+ raid_interface=iface)
+ with task_manager.acquire(self.context, node.id) as task:
+ self.assertIsInstance(task.driver.raid, impl)
diff --git a/ironic/tests/unit/objects/test_fields.py b/ironic/tests/unit/objects/test_fields.py
index 02ce1222d..35cc050e6 100644
--- a/ironic/tests/unit/objects/test_fields.py
+++ b/ironic/tests/unit/objects/test_fields.py
@@ -13,9 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import hashlib
-import inspect
-
from ironic.common import exception
from ironic.objects import fields
from ironic.tests import base as test_base
@@ -73,8 +70,6 @@ class TestStringFieldThatAcceptsCallable(test_base.TestCase):
def test_default_function():
return "default value"
- self.test_default_function_hash = hashlib.md5(
- inspect.getsource(test_default_function).encode()).hexdigest()
self.field = fields.StringFieldThatAcceptsCallable(
default=test_default_function)
@@ -102,8 +97,8 @@ class TestStringFieldThatAcceptsCallable(test_base.TestCase):
self.field.coerce('obj', 'attr', None))
def test__repr__includes_default_function_name_and_source_hash(self):
- expected = ('StringAcceptsCallable(default=test_default_function-%s,'
- 'nullable=False)' % self.test_default_function_hash)
+ expected = ('StringAcceptsCallable(default=<function '
+ 'test_default_function>,nullable=False)')
self.assertEqual(expected, repr(self.field))
diff --git a/ironic/tests/unit/objects/test_node.py b/ironic/tests/unit/objects/test_node.py
index a72dc6ea5..707d09e8d 100644
--- a/ironic/tests/unit/objects/test_node.py
+++ b/ironic/tests/unit/objects/test_node.py
@@ -171,7 +171,6 @@ class TestNodeObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
mock_update_node.assert_called_once_with(
uuid, {'properties': {"fake": "property"},
'driver': 'fake-driver',
- 'driver_internal_info': {},
'version': objects.Node.VERSION})
self.assertEqual(self.context, n._context)
res_updated_at = (n.updated_at).replace(tzinfo=None)
@@ -221,7 +220,6 @@ class TestNodeObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
{
'properties': {'fake': 'property'},
'driver': 'fake-driver',
- 'driver_internal_info': {},
'version': objects.Node.VERSION,
'maintenance_reason':
maintenance_reason[
diff --git a/lower-constraints.txt b/lower-constraints.txt
index 930205c6b..3f483d1fe 100644
--- a/lower-constraints.txt
+++ b/lower-constraints.txt
@@ -61,7 +61,7 @@ os-client-config==2.1.0
os-service-types==1.7.0
os-traits==0.4.0
osc-lib==2.0.0
-oslo.concurrency==3.26.0
+oslo.concurrency==4.2.0
oslo.config==5.2.0
oslo.context==2.19.2
oslo.db==4.40.0
diff --git a/releasenotes/notes/add-redfish-boot_iso-pass-through-8a6f4d0c98ada1d5.yaml b/releasenotes/notes/add-redfish-boot_iso-pass-through-8a6f4d0c98ada1d5.yaml
new file mode 100644
index 000000000..823991020
--- /dev/null
+++ b/releasenotes/notes/add-redfish-boot_iso-pass-through-8a6f4d0c98ada1d5.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Adds functionality to allow a user to supply a node
+ ``instance_info/boot_iso`` parameter on machines utilizing the
+ ``redfish-virtual-media`` boot interface. When combined with the
+ ``ramdisk`` deployment interface, this allows an instance to boot
+ into a user supplied ISO image.
diff --git a/releasenotes/notes/agent-power-a000fdf37cb870e4.yaml b/releasenotes/notes/agent-power-a000fdf37cb870e4.yaml
new file mode 100644
index 000000000..549a78b21
--- /dev/null
+++ b/releasenotes/notes/agent-power-a000fdf37cb870e4.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The new **experimental** ``agent`` power interface allows limited
+ provisioning operations on nodes without BMC credentials. See `story
+ 2007771 <https://storyboard.openstack.org/#!/story/2007771>`_ for details.
diff --git a/releasenotes/notes/agent-raid-647acfd599e83476.yaml b/releasenotes/notes/agent-raid-647acfd599e83476.yaml
new file mode 100644
index 000000000..84248fa10
--- /dev/null
+++ b/releasenotes/notes/agent-raid-647acfd599e83476.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ The ``agent`` RAID interface now supports building RAID as a deploy step
+ ``apply_configuration``.
diff --git a/releasenotes/notes/destroy-broken-8b13de8382199aca.yaml b/releasenotes/notes/destroy-broken-8b13de8382199aca.yaml
new file mode 100644
index 000000000..7db6a1ff8
--- /dev/null
+++ b/releasenotes/notes/destroy-broken-8b13de8382199aca.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Allows deleting nodes with a broken driver unless they require stopping
+ serial console.
diff --git a/releasenotes/notes/direct-deploy-steps-36486987156017d7.yaml b/releasenotes/notes/direct-deploy-steps-36486987156017d7.yaml
new file mode 100644
index 000000000..d45da4114
--- /dev/null
+++ b/releasenotes/notes/direct-deploy-steps-36486987156017d7.yaml
@@ -0,0 +1,17 @@
+---
+features:
+ - |
+ The ``deploy`` deploy step of the ``direct`` deploy interface has been
+ split into three deploy steps:
+
+ * ``deploy`` itself (priority 100) boots the deploy ramdisk
+
+ * ``write_image`` (priority 80) downloads the user image from inside
+ the ramdisk and writes it to the disk.
+
+ * ``prepare_instance_boot`` (priority 60) prepares the boot device and
+ writes the bootloader (if needed).
+
+ Priorities 81 to 99 to be used for in-band deploy steps that run before
+ the image is written. Priorities 61 to 79 can be used for in-band deploy
+ steps that modify the written image before the bootloader is installed.
diff --git a/releasenotes/notes/driver-maintenance-0945c2939fa4e917.yaml b/releasenotes/notes/driver-maintenance-0945c2939fa4e917.yaml
new file mode 100644
index 000000000..0a7bf25e0
--- /dev/null
+++ b/releasenotes/notes/driver-maintenance-0945c2939fa4e917.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes updating driver fields for nodes with a broken driver. This is
+ required to be able to set maintenance for such nodes.
diff --git a/releasenotes/notes/explicit_ipxe_config_options-d7bf9a743a13f523.yaml b/releasenotes/notes/explicit_ipxe_config_options-d7bf9a743a13f523.yaml
new file mode 100644
index 000000000..acf5daccf
--- /dev/null
+++ b/releasenotes/notes/explicit_ipxe_config_options-d7bf9a743a13f523.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+ - |
+ Operators upgrading from earlier versions using PXE should explicitly set
+ ``[pxe]ipxe_bootfile_name``, ``[pxe]uefi_ipxe_bootfile_name``, and
+ possibly ``[pxe]ipxe_bootfile_name_by_arch`` settings, as well as a
+ iPXE specific ``[pxe]ipxe_config_template`` override, if required.
+
+ Setting the ``[pxe]ipxe_config_template`` to no value will result in the
+ ``[pxe]pxe_config_template`` being used. The default value points to the
+ supplied standard iPXE template, so only highly customized operators may
+ have to tune this setting.
+fixes:
+ - |
+ Addresses the lack of an ability to explicitly set different bootloaders
+ for ``iPXE`` and ``PXE`` based boot operations via their respective
+ ``ipxe`` and ``pxe`` boot interfaces.
diff --git a/releasenotes/notes/ilo-support-boot-mode-management-apis-8173002daf79894c.yaml b/releasenotes/notes/ilo-support-boot-mode-management-apis-8173002daf79894c.yaml
new file mode 100644
index 000000000..bb3f9ae52
--- /dev/null
+++ b/releasenotes/notes/ilo-support-boot-mode-management-apis-8173002daf79894c.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds support for boot mode retrieval and setting with the ``ilo`` and
+ ``ilo5`` hardware types.
diff --git a/releasenotes/notes/in-band-steps-e4a1fe759029fea5.yaml b/releasenotes/notes/in-band-steps-e4a1fe759029fea5.yaml
new file mode 100644
index 000000000..d3867a344
--- /dev/null
+++ b/releasenotes/notes/in-band-steps-e4a1fe759029fea5.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ Adds support for running custom in-band deploy steps when provisioning.
+ Step priorities from 41 to 59 can be used for steps that run after
+ the image is written and the bootloader is installed.
+deprecations:
+ - |
+ Running the whole deployment process as a monolithic ``deploy.deploy``
+ deploy step is now deprecated. In a future release this step will only be
+ used to prepare deployment and starting the agent, and special handling
+ will be removed. All third party deploy interfaces must be updated
+ to provide real deploy steps instead and set the
+ ``has_decomposed_deploy_steps`` attribute to ``True`` on the deploy
+ interface level.
+other:
+ - |
+ As part of the agent deploy interfaces refactoring, breaking changes will
+ be made to implementations of ``AgentDeploy`` and ``ISCSIDeploy``.
+ Third party deploy interfaces must be updated to inherit
+ ``HeartbeatMixin``, ``AgentBaseMixin`` or ``AgentDeployMixin``
+ from ``ironic.drivers.modules.agent_base`` instead since their API is
+ considered more stable.
diff --git a/releasenotes/notes/ipa-erase_devices-skip-read-only-9f8cd9278c35a84e.yaml b/releasenotes/notes/ipa-erase_devices-skip-read-only-9f8cd9278c35a84e.yaml
new file mode 100644
index 000000000..b530dd807
--- /dev/null
+++ b/releasenotes/notes/ipa-erase_devices-skip-read-only-9f8cd9278c35a84e.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - |
+ Adds the capability for an operator to set a configuration setting which
+ tells the ironic-python-agent it is okay to skip read-only block devices
+ when performing an ``erase_devices`` cleaning operation. This requires
+ ironic-python-agent version 6.0.0 or greater and can be set using the
+ ``[deploy]erase_skip_read_only`` configuration option.
+other:
+ - |
+ Starting in ironic-python-agent 6.0.0, metadata erasure of read-only
+ devices is skipped by default.
diff --git a/releasenotes/notes/ipmi-retries-min-command-interval-070cd7eff5eb74dd.yaml b/releasenotes/notes/ipmi-retries-min-command-interval-070cd7eff5eb74dd.yaml
new file mode 100644
index 000000000..413224b02
--- /dev/null
+++ b/releasenotes/notes/ipmi-retries-min-command-interval-070cd7eff5eb74dd.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ When Ironic is doing IPMI retries the configured ``min_command_interval``
+ should be used instead of a default value of ``1``, which may be too short
+ for some BMCs.
diff --git a/releasenotes/notes/iscsi-ansible-steps-817b52269d2455b0.yaml b/releasenotes/notes/iscsi-ansible-steps-817b52269d2455b0.yaml
new file mode 100644
index 000000000..fec61904c
--- /dev/null
+++ b/releasenotes/notes/iscsi-ansible-steps-817b52269d2455b0.yaml
@@ -0,0 +1,28 @@
+---
+features:
+ - |
+ The ``deploy`` deploy step of the ``iscsi`` deploy interface has been
+ split into three deploy steps:
+
+ * ``deploy`` itself (priority 100) boots the deploy ramdisk
+
+ * ``write_image`` (priority 80) writes the image to the disk exposed
+ via iSCSI.
+
+ * ``prepare_instance_boot`` (priority 60) prepares the boot device and
+ writes the bootloader (if needed).
+
+ Priorities 81 to 99 to be used for in-band deploy steps that run before
+ the image is written. Priorities 61 to 79 can be used for in-band deploy
+ steps that modify the written image before the bootloader is installed.
+ - |
+ The ``deploy`` deploy step of the ``ansible`` deploy interface has been
+ split into two deploy steps:
+
+ * ``deploy`` itself (priority 100) boots the deploy ramdisk
+
+ * ``write_image`` (priority 80) writes the image to the disk and configures
+ the bootloader.
+
+ Priorities 81 to 99 to be used for in-band deploy steps that run before
+ the image is written.
diff --git a/releasenotes/notes/missing-sw-raid-b7fdc9259612970d.yaml b/releasenotes/notes/missing-sw-raid-b7fdc9259612970d.yaml
new file mode 100644
index 000000000..0b90cfda7
--- /dev/null
+++ b/releasenotes/notes/missing-sw-raid-b7fdc9259612970d.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes missing ``agent`` RAID compatibility for the ``ilo5`` and ``idrac``
+ hardware type preventing software RAID for working with them.
diff --git a/releasenotes/notes/no-power-on-842b21d55b07a632.yaml b/releasenotes/notes/no-power-on-842b21d55b07a632.yaml
new file mode 100644
index 000000000..71f4dc9eb
--- /dev/null
+++ b/releasenotes/notes/no-power-on-842b21d55b07a632.yaml
@@ -0,0 +1,9 @@
+---
+other:
+ - |
+ A new method ``supports_power_sync`` has been added to ``PowerInterface``.
+ If it returns ``False``, the conductor will not try to assert power state
+ for the node, merely recording the returned state instead.
+ - |
+ The base agent deploy interface code now correctly handles power interfaces
+ that do not support the ``power on`` action but support ``reboot``.
diff --git a/releasenotes/notes/node-network-data-6f998aaa57020f4b.yaml b/releasenotes/notes/node-network-data-6f998aaa57020f4b.yaml
new file mode 100644
index 000000000..d10f42b2b
--- /dev/null
+++ b/releasenotes/notes/node-network-data-6f998aaa57020f4b.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds `network_data` property to the node, a dictionary that represents the
+ node static network configuration. The Ironic API performs formal JSON
+ validation of node `network_data` content against user-supplied JSON schema
+ at driver validation step.
diff --git a/releasenotes/notes/token-reboot-b48b5981a58a30ae.yaml b/releasenotes/notes/token-reboot-b48b5981a58a30ae.yaml
new file mode 100644
index 000000000..9ee2ca70a
--- /dev/null
+++ b/releasenotes/notes/token-reboot-b48b5981a58a30ae.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Removes stale agent tokens when rebooting nodes using API. This prevents
+ lookup failures for nodes that get rebooted between fast-track operations.
diff --git a/requirements.txt b/requirements.txt
index 50053460e..b1b1011d5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -16,7 +16,7 @@ python-swiftclient>=3.2.0 # Apache-2.0
pytz>=2013.6 # MIT
stevedore>=1.20.0 # Apache-2.0
pysendfile>=2.0.0;sys_platform!='win32' # MIT
-oslo.concurrency>=3.26.0 # Apache-2.0
+oslo.concurrency>=4.2.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
oslo.context>=2.19.2 # Apache-2.0
oslo.db>=4.40.0 # Apache-2.0
@@ -34,7 +34,6 @@ pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
requests>=2.14.2 # Apache-2.0
rfc3986>=0.3.1 # Apache-2.0
jsonpatch!=1.20,>=1.16 # BSD
-WSME>=0.9.3 # MIT
Jinja2>=2.10 # BSD License (3 clause)
keystonemiddleware>=4.17.0 # Apache-2.0
oslo.messaging>=5.29.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 1101c09b5..c314e21a5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -121,6 +121,7 @@ ironic.hardware.interfaces.network =
noop = ironic.drivers.modules.network.noop:NoopNetwork
ironic.hardware.interfaces.power =
+ agent = ironic.drivers.modules.agent_power:AgentPower
fake = ironic.drivers.modules.fake:FakePower
ibmc = ironic.drivers.modules.ibmc.power:IBMCPower
idrac = ironic.drivers.modules.drac.power:DracPower
diff --git a/tools/bandit.yml b/tools/bandit.yml
index d99694d39..028d1a214 100644
--- a/tools/bandit.yml
+++ b/tools/bandit.yml
@@ -89,7 +89,6 @@ tests:
# (optional) list skipped test IDs here, eg '[B101, B406]':
skips:
- B104
- - B303
- B604
### (optional) plugin settings - some test plugins require configuration data
diff --git a/tox.ini b/tox.ini
index 1c2aefc6f..67ffcad0e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-minversion = 3.1.0
+minversion = 3.2.1
skipsdist = True
envlist = py3,pep8
ignore_basepython_conflict=true
@@ -132,7 +132,6 @@ per-file-ignores =
ironic/tests/base.py:E402
ironic/tests/unit/api/*:H210
ironic/tests/unit/conductor/test_deployments.py:H210
- ironic/tests/unit/conductor/test_task_manager.py:H210
ironic/tests/unit/conductor/test_notification_utils.py:H210
ironic/tests/unit/common/*:H210
ironic/tests/unit/drivers/modules/test_console_utils.py:H210
diff --git a/zuul.d/ironic-jobs.yaml b/zuul.d/ironic-jobs.yaml
index fda847d03..526a4b3b2 100644
--- a/zuul.d/ironic-jobs.yaml
+++ b/zuul.d/ironic-jobs.yaml
@@ -44,17 +44,19 @@
IRONIC_INSPECTOR_TEMPEST_INTROSPECTION_TIMEOUT: 1200
IRONIC_TEMPEST_BUILD_TIMEOUT: 900
IRONIC_TEMPEST_WHOLE_DISK_IMAGE: False
- IRONIC_VM_COUNT: 1
+ IRONIC_VM_COUNT: 2
IRONIC_VM_EPHEMERAL_DISK: 1
IRONIC_VM_SPECS_RAM: 3072
IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs'
# NOTE(dtantsur): in some jobs we end up with 12 disks total, so reduce
# each of them. For don't need all 10 GiB for CirrOS anyway.
IRONIC_VM_SPECS_DISK: 4
+ IRONIC_VM_SPECS_CPU: 2
IRONIC_DEFAULT_DEPLOY_INTERFACE: iscsi
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
SERVICE_TIMEOUT: 90
devstack_plugins:
@@ -65,18 +67,15 @@
'{{ devstack_base_dir }}/ironic-bm-logs': 'logs'
'{{ devstack_base_dir }}/data/networking-generic-switch/netmiko_session.log': 'logs'
devstack_services:
- q-agt: false
- q-dhcp: false
- q-l3: false
- q-meta: false
- q-metering: false
- q-svc: false
- neutron-api: true
- neutron-agent: true
- neutron-dhcp: true
- neutron-l3: true
- neutron-metadata-agent: true
- neutron-metering: true
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ q-svc: true
+ ovn-controller: false
+ ovn-northd: false
+ q-ovn-metadata-agent: false
c-api: False
c-bak: False
@@ -121,6 +120,7 @@
IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_RPC_TRANSPORT: json-rpc
IRONIC_VM_SPECS_RAM: 384
+ IRONIC_VM_SPECS_CPU: 1
IRONIC_VM_COUNT: 6
IRONIC_VM_VOLUME_COUNT: 2
# We're using a lot of disk space in this job. Some testing nodes have
@@ -182,6 +182,7 @@
IRONIC_VM_COUNT: 6
IRONIC_VM_VOLUME_COUNT: 2
IRONIC_VM_SPECS_RAM: 384
+ IRONIC_VM_SPECS_CPU: 1
# We're using a lot of disk space in this job. Some testing nodes have
# a small root partition, so use /opt which is mounted from a bigger
# ephemeral partition on such nodes
@@ -208,7 +209,6 @@
name: ironic-tempest-partition-bios-redfish-pxe
description: "Deploy ironic node over PXE using BIOS boot mode"
parent: ironic-base
- timeout: 5400
required-projects:
- openstack/sushy-tools
vars:
@@ -282,7 +282,6 @@
name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool
description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool
parent: ironic-base
- timeout: 9600
vars:
devstack_localrc:
IRONIC_DEFAULT_DEPLOY_INTERFACE: direct
@@ -303,7 +302,6 @@
name: ironic-tempest-wholedisk-bios-snmp-pxe
description: SNMP power, no-op management, netboot and whole disk images.
parent: ironic-base
- timeout: 5400
vars:
devstack_localrc:
IRONIC_ENABLED_HARDWARE_TYPES: snmp
@@ -317,28 +315,27 @@
description: ironic-tempest-ipa-partition-uefi-pxe_ipmitool
parent: ironic-base
nodeset: openstack-single-node-focal
- timeout: 5400
vars:
devstack_localrc:
IRONIC_BOOT_MODE: uefi
- IRONIC_VM_SPECS_RAM: 3096
+ IRONIC_VM_SPECS_RAM: 4096
IRONIC_AUTOMATED_CLEAN_ENABLED: False
IRONIC_DEFAULT_BOOT_OPTION: netboot
- job:
name: ironic-tempest-ipa-partition-pxe_ipmitool
- description: ironic-tempest-ipa-partition-pxe_ipmitool
+ description: ironic-tempest-ipa-partition-pxe_ipmitool that also tests cleaning.
parent: ironic-base
- timeout: 5400
vars:
devstack_localrc:
IRONIC_DEFAULT_BOOT_OPTION: netboot
+ IRONIC_AUTOMATED_CLEAN_ENABLED: True
+
- job:
name: ironic-tempest-bfv
description: ironic-tempest-bfv
parent: ironic-base
- timeout: 9600
vars:
tempest_test_regex: baremetal_boot_from_volume
devstack_localrc:
@@ -389,7 +386,6 @@
name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect
description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect
parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool
- timeout: 5400
vars:
devstack_localrc:
IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE: http
@@ -401,7 +397,6 @@
name: ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect
description: ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect
parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool
- timeout: 5400
vars:
devstack_localrc:
IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE: http
@@ -415,7 +410,6 @@
name: ironic-tempest-functional-python3
description: ironic-tempest-functional-python3
parent: ironic-base
- timeout: 5400
pre-run: playbooks/ci-workarounds/etc-neutron.yaml
vars:
tempest_test_regex: ironic_tempest_plugin.tests.api
@@ -446,19 +440,12 @@
q-meta: False
q-metering: False
q-svc: False
- neutron-api: False
- neutron-agent: False
- neutron-dhcp: False
- neutron-l3: False
- neutron-metadata-agent: False
- neutron-metering: False
- job:
name: ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
description: ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
parent: tempest-multinode-full-py3
pre-run: playbooks/ci-workarounds/pre.yaml
- timeout: 10800
required-projects:
- openstack/ironic
- openstack/ironic-python-agent
@@ -483,7 +470,7 @@
vars:
tox_envlist: all
tempest_concurrency: 3
- tempest_test_regex: "(ironic_tempest_plugin.tests.scenario|test_schedule_to_all_nodes)"
+ tempest_test_regex: "ironic_tempest_plugin.tests.scenario"
tempest_test_timeout: 2400
devstack_localrc:
BUILD_TIMEOUT: 2400
@@ -513,11 +500,12 @@
IRONIC_TEMPEST_BUILD_TIMEOUT: 600
IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True
IRONIC_USE_LINK_LOCAL: True
- IRONIC_VM_COUNT: 6
+ IRONIC_VM_COUNT: 3
IRONIC_VM_EPHEMERAL_DISK: 0
IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs'
- IRONIC_VM_SPECS_RAM: 384
+ IRONIC_VM_SPECS_RAM: 512
IRONIC_VM_SPECS_DISK: 4
+ IRONIC_VM_SPECS_CPU: 1
OVS_BRIDGE_MAPPINGS: 'mynetwork:brbm,public:br-infra'
OVS_PHYSICAL_BRIDGE: brbm
PHYSICAL_NETWORK: mynetwork
@@ -525,6 +513,7 @@
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vlan
Q_PLUGIN: ml2
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
SWIFT_ENABLE_TEMPURLS: True
SWIFT_TEMPURL_KEY: secretkey
TENANT_VLAN_RANGE: 100:150
@@ -574,6 +563,9 @@
q-meta: True
q-metering: True
q-svc: True
+ ovn-controller: False
+ ovn-northd: False
+ q-ovn-metadata-agent: False
rabbit: True
group-vars:
subnode:
@@ -591,14 +583,17 @@
IRONIC_PROVISION_NETWORK_NAME: ironic-provision
IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_USE_LINK_LOCAL: True
- IRONIC_VM_COUNT: 6
+ IRONIC_VM_COUNT: 3
IRONIC_VM_EPHEMERAL_DISK: 0
IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs'
IRONIC_VM_NETWORK_BRIDGE: sub1brbm
- IRONIC_VM_SPECS_RAM: 384
+ IRONIC_VM_SPECS_RAM: 512
+ IRONIC_VM_SPECS_DISK: 4
+ IRONIC_VM_SPECS_CPU: 1
OVS_BRIDGE_MAPPINGS: 'mynetwork:sub1brbm,public:br-infra'
OVS_PHYSICAL_BRIDGE: sub1brbm
PHYSICAL_NETWORK: mynetwork
+ Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vlan
VIRT_DRIVER: ironic
PUBLIC_BRIDGE: br-infra
@@ -611,6 +606,9 @@
cinder: False
q-agt: True
+ ovn-controller: False
+ ovn-northd: False
+ q-ovn-metadata-agent: False
n-cpu: True
- job:
@@ -645,7 +643,6 @@
IRONIC_BOOT_MODE: uefi
IRONIC_AUTOMATED_CLEAN_ENABLED: False
IRONIC_DEFAULT_BOOT_OPTION: netboot
- IRONIC_VM_SPECS_DISK: 10
IRONIC_VM_SPECS_RAM: 4096
- job:
@@ -673,7 +670,6 @@
- job:
name: ironic-tempest-ipa-wholedisk-bios-ipmi-direct-dib
parent: ironic-base
- timeout: 9600
vars:
devstack_services:
s-account: True
@@ -701,19 +697,12 @@
devstack_services:
# NOTE(TheJulia): It seems our devstack plugin does not play well
# with multitenancy and the newer neutron service names.
- neutron: True
- neutron-api: True
- neutron-agent: True
- neutron-dhcp: True
- neutron-l3: True
- neutron-metadata-agent: False
- neutron-metering: False
- q-agt: False
- q-dhcp: False
- q-l3: False
+ q-agt: True
+ q-dhcp: True
+ q-l3: True
q-meta: False
q-metering: False
- q-svc: False
+ q-svc: True
swift: True
devstack_plugins:
ironic: https://opendev.org/openstack/ironic
@@ -732,8 +721,8 @@
IRONIC_IPXE_ENABLED: True
IRONIC_PROVISION_NETWORK_NAME: ironic-provision
OVS_PHYSICAL_BRIDGE: brbm
- NEUTRON_PHYSICAL_NETWORK: mynetwork
- NEUTRON_TENANT_VLAN_RANGE: 100:150
+ PHYSICAL_NETWORK: mynetwork
+ TENANT_VLAN_RANGE: 100:150
IRONIC_ENABLED_NETWORK_INTERFACES: flat,neutron
IRONIC_NETWORK_INTERFACE: neutron
IRONIC_DEFAILT_DEPLOY_INTERFACE: direct
@@ -742,12 +731,10 @@
IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True
IRONIC_VM_EPHEMERAL_DISK: 0
# This will swap and needs to get to tinycore soon.
- IRONIC_VM_COUNT: 2
IRONIC_VM_SPECS_CPU: 2
Q_PLUGIN: ml2
ENABLE_TENANT_VLANS: True
Q_ML2_TENANT_NETWORK_TYPE: vlan
- NEUTRON_TENANT_NETWORK_TYPE: vlan
OVS_BRIDGE_MAPPINGS: "public:br-ex,mynetwork:brbm"
USE_PROVIDER_NETWORKING: True
PUBLIC_PHYSICAL_NETWORK: public
@@ -763,10 +750,9 @@
# Used by devstack/ironic/nova/neutron
- job:
- name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
- description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
+ name: ironic-tempest-bios-ipmi-direct-tinyipa
+ description: ironic-tempest-wholedisk-bios-ipmi-direct-tinyipa that also tests cleaning.
parent: ironic-base
- timeout: 5400
vars:
devstack_localrc:
IRONIC_DEFAULT_DEPLOY_INTERFACE: direct
@@ -785,6 +771,11 @@
s-proxy: True
- job:
+ name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
+ description: Alias for ironic-tempest-wholedisk-bios-ipmi-direct-tinyipa
+ parent: ironic-tempest-bios-ipmi-direct-tinyipa
+
+- job:
name: ironic-grenade
parent: grenade
timeout: 10800
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index b08a37500..0c4fe2d91 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -24,11 +24,12 @@
- ironic-tempest-ipa-partition-pxe_ipmitool
- ironic-tempest-ipa-partition-uefi-pxe_ipmitool
- ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
- - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
+ - ironic-tempest-bios-ipmi-direct-tinyipa
- ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect
- ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect
- ironic-tempest-bfv
- ironic-tempest-ipa-partition-uefi-pxe-grub2
+ - ironic-tempest-ipxe-ipv6
- metalsmith-integration-glance-localboot-centos7
# Non-voting jobs
- ironic-tox-bandit:
@@ -45,8 +46,6 @@
voting: false
- ironic-tempest-pxe_ipmitool-postgres:
voting: false
- - ironic-tempest-ipxe-ipv6:
- voting: false
gate:
queue: ironic
jobs:
@@ -62,11 +61,12 @@
- ironic-tempest-ipa-partition-pxe_ipmitool
- ironic-tempest-ipa-partition-uefi-pxe_ipmitool
- ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
- - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
+ - ironic-tempest-bios-ipmi-direct-tinyipa
- ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect
- ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect
- ironic-tempest-bfv
- ironic-tempest-ipa-partition-uefi-pxe-grub2
+ - ironic-tempest-ipxe-ipv6
- metalsmith-integration-glance-localboot-centos7
experimental:
jobs: